You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:50:26 UTC

[01/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Updated Branches:
  refs/heads/trunk 37f11ebda -> 43f14b349


http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
new file mode 100644
index 0000000..9fbcd0e
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHbaseRegionServer(RMFTestCase):
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "configure",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+    
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase'
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "stop",
+                   config_file="default.json"
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver && rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "configure",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "stop",
+                   config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver && rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )   
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )  
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )   
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )    
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+  
+  
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_regionserver_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
\ No newline at end of file


[08/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py
deleted file mode 100644
index dac198a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import subprocess
-import json
-
-RESOURCEMANAGER = 'rm'
-NODEMANAGER = 'nm'
-HISTORYSERVER = 'hs'
-
-STARTED_STATE = 'STARTED'
-RUNNING_STATE = 'RUNNING'
-
-#Return reponse for given path and address
-def getResponse(path, address, ssl_enabled):
-
-  command = "curl"
-  httpGssnegotiate = "--negotiate"
-  userpswd = "-u:"
-  insecure = "-k"# This is smoke test, no need to check CA of server
-  if ssl_enabled:
-    url = 'https://' + address + path
-  else:
-    url = 'http://' + address + path
-      
-  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
-  try:
-    proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    (stdout, stderr) = proc.communicate()
-    response = json.loads(stdout)
-    if response == None:
-      print 'There is no response for url: ' + str(url)
-      exit(1)
-    return response
-  except Exception as e:
-    print 'Error getting response for url:' + str(url), e
-    exit(1)
-
-#Verify that REST api is available for given component
-def validateAvailability(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAvailabilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking availability status of component', e
-    exit(1)
-
-#Validate component-specific response
-def validateAvailabilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        print 'Resourcemanager is not started'
-        return False
-
-    elif component == NODEMANAGER:
-      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
-      if node_healthy:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of availability response for ' + str(component), e
-    return False
-
-#Verify that component has required resources to work
-def validateAbility(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAbilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking ability of component', e
-    exit(1)
-
-#Validate component-specific response that it has required resources to work
-def validateAbilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      nodes = []
-      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
-        nodes = response['nodes']['node']
-      connected_nodes_count = len(nodes)
-      if connected_nodes_count == 0:
-        print 'There is no connected nodemanagers to resourcemanager'
-        return False
-      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
-      active_nodes_count = len(active_nodes)
-
-      if connected_nodes_count == 0:
-        print 'There is no connected active nodemanagers to resourcemanager'
-        return False
-      else:
-        return True
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of ability response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
-  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-  
-  address = options.address
-  ssl_enabled = (options.ssl_enabled) in 'true'
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == NODEMANAGER:
-    path = '/ws/v1/node/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validateAvailability(component, path, address, ssl_enabled)
-
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/nodes'
-    validateAbility(component, path, address, ssl_enabled)
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py
deleted file mode 100644
index 9b6003c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-class Histroryserver(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('historyserver',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('historyserver',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.histroyserver_pid_file)
-
-if __name__ == "__main__":
-  Histroryserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py
deleted file mode 100644
index 3b789f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class MapReduce2ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
-    input_file = format("/user/{smokeuser}/mapredsmokeinput")
-    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
-
-    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
-    create_file_cmd = format("fs -put /etc/passwd {input_file}")
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
-      Execute(kinit_cmd,
-              user=params.smokeuser
-      )
-
-    ExecuteHadoop(cleanup_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-    ExecuteHadoop(create_file_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir,
-                  logoutput=True
-    )
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-if __name__ == "__main__":
-  MapReduce2ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py
deleted file mode 100644
index 54119a7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-
-class MapReduce2Client(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py
deleted file mode 100644
index dbeaca0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-class Nodemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('nodemanager',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('nodemanager',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nodemanager_pid_file)
-
-if __name__ == "__main__":
-  Nodemanager().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py
deleted file mode 100644
index bdcf88e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-config_dir = "/etc/hadoop/conf"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['global']['hdfs_user']
-
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-rm_host = config['clusterHostInfo']['rm_host'][0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-yarn_heapsize = config['configurations']['global']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
-
-yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
-
-rm_webui_address = format("{rm_host}:{rm_port}")
-rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-
-nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
-nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
-
-
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-
-user_group = config['configurations']['global']['user_group']
-limits_conf_dir = "/etc/security/limits.d"
-hadoop_conf_dir = "/etc/hadoop/conf"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py
deleted file mode 100644
index 0540670..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    yarn()
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('resourcemanager',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-
-    yarn_user = params.yarn_user
-    conf_dir = params.config_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("/usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    Execute(yarn_refresh_cmd,
-            user=yarn_user
-    )
-    pass
-
-
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py
deleted file mode 100644
index 441ef6c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-def service(
-    name,
-    action='start'):
-
-  import params
-
-  if (name == 'historyserver'):
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
-    usr = params.mapred_user
-  else:
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
-    usr = params.yarn_user
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{cmd} start {name}")
-    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            user=usr,
-            not_if=no_op
-    )
-
-    Execute(no_op,
-            user=usr,
-            not_if=no_op,
-            initial_wait=5
-    )
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {name}")
-    Execute(daemon_cmd,
-            user=usr,
-    )
-    rm_pid = format("rm -f {pid_file}")
-    Execute(rm_pid,
-            user=usr
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py
deleted file mode 100644
index c53cc78..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    run_yarn_check_cmd = "/usr/bin/yarn node -list"
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    validateStatusFileName = "validateYarnComponentStatus.py"
-    validateStatusFilePath = format("/tmp/{validateStatusFileName}")
-
-    validateStatusCmd = format("{validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName),
-         mode=0755
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            user=params.smokeuser,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd,
-                  user=params.smokeuser
-    )
-
-if __name__ == "__main__":
-  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py
deleted file mode 100644
index e554513..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-mapred_user = config['configurations']['global']['mapred_user']
-yarn_user = config['configurations']['global']['yarn_user']
-yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
-mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
-yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-histroyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py
deleted file mode 100644
index 986356e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def yarn():
-  import params
-
-  Directory([params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory([params.mapred_pid_dir, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory([params.nm_local_dirs, params.nm_log_dirs, params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            recursive=True
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(params.yarn_job_summary_log,
-       owner=params.yarn_user,
-       group=params.user_group
-  )
-
-  File(params.mapred_job_summary_log,
-       owner=params.mapred_user,
-       group=params.user_group
-  )
-
-  File(format("{limits_conf_dir}/yarn.conf"),
-       mode=0644,
-       content=Template('yarn.conf.j2')
-  )
-
-  File(format("{limits_conf_dir}/mapreduce.conf"),
-       mode=0644,
-       content=Template('mapreduce.conf.j2')
-  )
-
-  File(format("{config_dir}/yarn-env.sh"),
-       owner=params.yarn_user,
-       group=params.user_group,
-       mode=0755,
-       content=Template('yarn-env.sh.j2')
-  )
-
-  if params.security_enabled:
-    container_executor = format("{yarn_container_bin}/container-executor")
-    File(container_executor,
-         group=params.yarn_executor_container_group,
-         mode=06050
-    )
-    
-    File(format("{config_dir}/container-executor.cfg"),
-         group=params.user_group,
-         mode=0644,
-         content=Template('container-executor.cfg.j2')
-    )
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py
deleted file mode 100644
index 7e9c564..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-
-class YarnClient(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  YarnClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2
deleted file mode 100644
index 29ad949..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs={{nm_local_dirs}}
-yarn.nodemanager.log-dirs={{nm_log_dirs}}
-yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
-banned.users = hfds,yarn,mapred,bin
-min.user.id=1000

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index 4a4c698..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2
deleted file mode 100644
index 76caea4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{mapred_user}}   - nofile 32768
-{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2
deleted file mode 100644
index 70bb71a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2
+++ /dev/null
@@ -1,119 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-export JAVA_HOME={{java64_home}}
-
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-# resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-# some Java parameters
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
-
-# For setting YARN specific HEAP sizes please use this
-# Parameter and set appropriately
-YARN_HEAPSIZE={{yarn_heapsize}}
-
-# check envvars which might override default args
-if [ "$YARN_HEAPSIZE" != "" ]; then
-  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-fi
-
-# Resource Manager specific parameters
-
-# Specify the max Heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the ResourceManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_RESOURCEMANAGER_OPTS=
-
-# Node Manager specific parameters
-
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the NodeManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_NODEMANAGER_OPTS=
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-
-# default log directory & file
-if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-fi
-if [ "$YARN_LOGFILE" = "" ]; then
-  YARN_LOGFILE='yarn.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$YARN_POLICYFILE" = "" ]; then
-  YARN_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
-YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2
deleted file mode 100644
index be89b07..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{yarn_user}}   - nofile 32768
-{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
deleted file mode 100644
index 8fb48b9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>log4j.appender.CONSOLE</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.CONSOLE.Threshold</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.appender.CONSOLE.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.CONSOLE.layout.ConversionPattern</name>
-    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.Threshold</name>
-    <value>DEBUG</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.File</name>
-    <value>zookeeper.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.MaxFileSize</name>
-    <value>10MB</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.layout.ConversionPattern</name>
-    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE</name>
-    <value>org.apache.log4j.FileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.Threshold</name>
-    <value>TRACE</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.File</name>
-    <value>zookeeper_trace.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.layout.ConversionPattern</name>
-    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 3e048a6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.2.1.1</version>
-      <components>
-
-        <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZOOKEEPER_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>zookeeper</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>zookeeper-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh
deleted file mode 100644
index 07017e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh
deleted file mode 100644
index 49ceb4d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh
deleted file mode 100644
index 32dfce4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh
deleted file mode 100644
index c1c11b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  su - $smoke_user -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
-  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py
deleted file mode 100644
index 3c02248..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['global']['zk_user']
-hostname = config['hostname']
-zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['global']['user_group']
-
-smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
-zk_log_dir = config['configurations']['global']['zk_log_dir']
-zk_data_dir = config['configurations']['global']['zk_data_dir']
-zk_pid_dir = status_params.zk_pid_dir
-zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize = "-Xmx1024m"
-
-tickTime = config['configurations']['global']['tickTime']
-initLimit = config['configurations']['global']['initLimit']
-syncLimit = config['configurations']['global']['syncLimit']
-clientPort = config['configurations']['global']['clientPort']
-
-if 'zoo.cfg' in config['configurations']:
-  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
-else:
-  zoo_cfg_properties_map = {}
-zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
-
-zk_primary_name = "zookeeper"
-zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
-zk_principal = zk_principal_name.replace('_HOST',hostname)
-
-java64_home = config['hostLevelParams']['java_home']
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-
-keytab_path = "/etc/security/keytabs"
-zk_keytab_path = format("{keytab_path}/zk.service.keytab")
-zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
-zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-security_enabled = config['configurations']['global']['security_enabled']
-
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#log4j.properties
-if ('zookeeper-log4j' in config['configurations']):
-  log4j_props = config['configurations']['zookeeper-log4j']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py
deleted file mode 100644
index 6b3553d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ZookeeperServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File("/tmp/zkSmoke.sh",
-         mode=0755,
-         content=StaticFile('zkSmoke.sh')
-    )
-
-    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
-                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
-                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd_qourum,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py
deleted file mode 100644
index 98f2903..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-zk_pid_dir = config['configurations']['global']['zk_pid_dir']
-zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py
deleted file mode 100644
index ac69829..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-
-def zookeeper(type = None):
-  import params
-
-  Directory(params.config_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
-  configFile("configuration.xsl", template_name="configuration.xsl.j2")
-
-  Directory(params.zk_pid_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_log_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_data_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  if type == 'server':
-    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
-
-    File(format("{zk_data_dir}/myid"),
-         mode = 0644,
-         content = myid
-    )
-
-  if (params.log4j_props != None):
-    PropertiesFile('log4j.properties',
-                   dir=params.config_dir,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.zk_user,
-                   group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
-    File(format("{params.config_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.zk_user
-    )
-
-  if params.security_enabled:
-    if type == "server":
-      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-    else:
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-
-  File(format("{config_dir}/zoo_sample.cfg"),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-def configFile(name, template_name=None):
-  import params
-
-  File(format("{config_dir}/{name}"),
-       content=Template(template_name),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
deleted file mode 100644
index 028a37d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-
-class ZookeeperClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    zookeeper(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
deleted file mode 100644
index e8cc264..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-from zookeeper_service import zookeeper_service
-
-class ZookeeperServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    zookeeper(type='server')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    zookeeper_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    zookeeper_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zk_pid_file)
-
-if __name__ == "__main__":
-  ZookeeperServer().execute()
\ No newline at end of file


[27/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..3ad81a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='hiveserver2')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..e8d4e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
+
+  if action == 'start':
+    demon_cmd = format("{cmd}")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(demon_cmd,
+            user=params.hive_user,
+            not_if=no_op_test
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
+
+  elif action == 'stop':
+    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(demon_cmd)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..8567311
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+class MysqlServer(Script):
+
+  if System.get_instance().os_family == "suse":
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action='start')
+
+    File(params.mysql_adduser_path,
+         mode=0755,
+         content=StaticFile('addMysqlUser.sh')
+    )
+
+    # Autoescaping
+    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
+           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+    mysql_service(daemon_name=self.daemon_name, action='stop')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'stop')
+
+  def status(self, env):
+    mysql_service(daemon_name=self.daemon_name, action = 'status')
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..cfb3e08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+  cmd = format('service {daemon_name} {action}')
+
+  if action == 'status':
+    logoutput = False
+  else:
+    logoutput = True
+
+  Execute(cmd,
+          path="/usr/local/bin/:/bin/:/sbin/",
+          tries=1,
+          logoutput=logoutput)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..734d3ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_server_conf_dir = "/etc/hive/conf.server"
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+#users
+hive_user = config['configurations']['global']['hive_user']
+hive_lib = '/usr/lib/hive/lib/'
+#JDBC driver jar name
+hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_port = config['configurations']['global']['hive_metastore_port']
+hive_var_lib = '/var/lib/hive'
+hive_server_host = config['clusterHostInfo']['hive_server_host']
+hive_url = format("jdbc:hive2://{hive_server_host}:10000")
+
+smokeuser = config['configurations']['global']['smokeuser']
+smoke_test_sql = "/tmp/hiveserver2.sql"
+smoke_test_path = "/tmp/hiveserver2Smoke.sh"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_conf_dir = "/etc/hive/conf"
+hive_dbroot = config['configurations']['global']['hive_dbroot']
+hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#hive-site
+hive_database_name = config['configurations']['global']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh'
+
+hadoop_home = '/usr'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user =  config['configurations']['global']['hdfs_user']
+user_group = config['configurations']['global']['user_group']
+artifact_dir = "/tmp/HDP-artifacts/"
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+start_hiveserver2_path = "/tmp/start_hiveserver2_script"
+start_metastore_path = "/tmp/start_metastore_script"
+
+hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+java64_home = config['hostLevelParams']['java_home']
+
+##### MYSQL
+
+db_name = config['configurations']['global']['hive_database_name']
+mysql_user = "mysql"
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = "/tmp/addMysqlUser.sh"
+
+########## HCAT
+
+hcat_conf_dir = '/etc/hcatalog/conf'
+
+metastore_port = 9933
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['global']['hcat_user']
+webhcat_user = config['configurations']['global']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+
+hadoop_conf_dir = '/etc/hadoop/conf'
+
+#hive-log4j.properties.template
+if ('hive-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hive-log4j']
+else:
+  log4j_props = None
+
+#hive-exec-log4j.properties.template
+if ('hive-exec-log4j' in config['configurations']):
+  log4j_exec_props = config['configurations']['hive-exec-log4j']
+else:
+  log4j_exec_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..111e8a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+from hcat_service_check import hcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
+      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
+      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
+    else:
+      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
+
+    File(params.smoke_test_path,
+         content=StaticFile('hiveserver2Smoke.sh'),
+         mode=0755
+    )
+
+    File(params.smoke_test_sql,
+         content=StaticFile('hiveserver2.sql')
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True,
+            user=params.smokeuser)
+
+    hcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
new file mode 100644
index 0000000..7770975
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hcat-env.sh.j2
new file mode 100644
index 0000000..2a35240
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hcat-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{hcat_user}}
+METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
new file mode 100644
index 0000000..548262a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/global.xml
deleted file mode 100644
index ceedd56..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/global.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
deleted file mode 100644
index 9389ed0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-    <description> Comma separated list of user and group names that are allowed
-      to submit jobs to the 'default' queue. The user list and the group list
-      are separated by a blank. For e.g. alice,bob group1,group2.
-      If set to the special value '*', it means all users are allowed to
-      submit jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-    <description> Comma separated list of user and group names that are allowed
-      to delete jobs or modify job's priority for jobs not owned by the current
-      user in the 'default' queue. The user list and the group list
-      are separated by a blank. For e.g. alice,bob group1,group2.
-      If set to the special value '*', it means all users are allowed to do
-      this operation.
-    </description>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
deleted file mode 100644
index 6691f8f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
+++ /dev/null
@@ -1,386 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-<!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-    <description>
-    Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for map tasks.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for reduce tasks.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metainfo.xml
deleted file mode 100644
index ddaa979..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metainfo.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (client libraries)</comment>
-    <version>2.2.0.2.0.6.0</version>
-    <components>
-        <component>
-            <name>HISTORYSERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MAPREDUCE2_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>mapred-site</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metrics.json
deleted file mode 100644
index b611ed6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/metrics.json
+++ /dev/null
@@ -1,383 +0,0 @@
-{
-  "HISTORYSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.metrics.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.metrics.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 9c420bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Email address.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios process user.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Web user name.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description></description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
index 42bee82..a7396b9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
@@ -16,19 +16,98 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>NAGIOS</name>
+      <comment>Nagios Monitoring and Alerting system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
             <name>NAGIOS_SERVER</name>
             <category>MASTER</category>
+            <commandScript>
+              <script>scripts/nagios_server.py</script>
+              <scriptType>PYTHON</scriptType>
+              <timeout>600</timeout>
+            </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>perl</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>perl-Net-SNMP</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-plugins-1.4.9</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-www-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>fping</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hdp_mon_nagios_addons</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>php5-json</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>php-pecl-json.x86_64</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>redhat5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>php-pecl-json.x86_64</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>oraclelinux5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>php-pecl-json.x86_64</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php
new file mode 100644
index 0000000..f4063fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_aggregate.php
@@ -0,0 +1,243 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  }
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      }
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        }
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "SYSTEM":
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+
+/* JSON documment format */
+/*
+{
+  "programstatus":{
+    "last_command_check":"1327385743"
+  },
+  "hostcounts":{
+    "up_nodes":"",
+    "down_nodes":""
+  },
+  "hoststatus":[
+    {
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_state":"0",
+      "last_hard_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_check":"1327385564",
+      "current_attempt":"1",
+      "last_hard_state_change":"1327362079",
+      "last_time_up":"1327385574",
+      "last_time_down":"0",
+      "last_time_unreachable":"0",
+      "is_flapping":"0",
+      "last_check":"1327385574",
+      "servicestatus":[
+      ]
+    }
+  ],
+  "servicestatus":[
+    {
+      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
+      "service_description":"HDFS Current Load",
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_attempt":"1",
+      "current_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_hard_state_change":"1327362079",
+      "last_time_ok":"1327385479",
+      "last_time_warning":"0",
+      "last_time_unknown":"0",
+      "last_time_critical":"0",
+      "last_check":"1327385574",
+      "is_flapping":"0"
+    }
+  ]
+}
+*/
+
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl
new file mode 100644
index 0000000..a5680f7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_cpu.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+use strict;
+use Net::SNMP;
+use Getopt::Long;
+
+# Variable
+my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
+my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
+my $o_host = 	undef;
+my $o_community = undef;
+my $o_warn=	undef;
+my $o_crit=	undef;
+my $o_timeout = 15;
+my $o_port = 161;
+
+sub Usage {
+    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
+}
+
+Getopt::Long::Configure ("bundling");
+GetOptions(
+  'H:s'   => \$o_host,	
+  'C:s'   => \$o_community,	
+  'c:s'   => \$o_crit,        
+  'w:s'   => \$o_warn
+          );
+if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
+  Usage();
+  exit 3;
+}
+$o_warn =~ s/\%//g; 
+$o_crit =~ s/\%//g;
+alarm ($o_timeout);
+$SIG{'ALRM'} = sub {
+ print "Unable to contact host: $o_host\n";
+ exit 3;
+};
+
+# Connect to host
+my ($session,$error);
+($session, $error) = Net::SNMP->session(
+		-hostname  => $o_host,
+		-community => $o_community,
+		-port      => $o_port,
+		-timeout   => $o_timeout
+	  );
+if (!defined($session)) {
+   printf("Error opening session: %s.\n", $error);
+   exit 3;
+}
+
+my $exit_val=undef;
+my $resultat =  (Net::SNMP->VERSION < 4) ?
+	  $session->get_table($base_proc)
+	: $session->get_table(Baseoid => $base_proc);
+
+if (!defined($resultat)) {
+   printf("ERROR: Description table : %s.\n", $session->error);
+   $session->close;
+   exit 3;
+}
+
+$session->close;
+
+my ($cpu_used,$ncpu)=(0,0);
+foreach my $key ( keys %$resultat) {
+  if ($key =~ /$proc_load/) {
+    $cpu_used += $$resultat{$key};
+    $ncpu++;
+  }
+}
+
+if ($ncpu==0) {
+  print "Can't find CPU usage information : UNKNOWN\n";
+  exit 3;
+}
+
+$cpu_used /= $ncpu;
+
+print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
+printf(" %.1f%%",$cpu_used);
+$exit_val=0;
+
+if ($cpu_used > $o_crit) {
+ print " > $o_crit% : CRITICAL\n";
+ $exit_val=2;
+} else {
+  if ($cpu_used > $o_warn) {
+   print " > $o_warn% : WARNING\n";
+   $exit_val=1;
+  }
+}
+print " < $o_warn% : OK\n" if ($exit_val eq 0);
+exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php
new file mode 100644
index 0000000..dee22b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_datanode_storage.php
@@ -0,0 +1,100 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the storage capacity remaining on local datanode storage
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
+  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  }  
+  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
+
+  $out_msg = "Capacity:[" . $cap_total . 
+             "], Remaining Capacity:[" . $cap_remain . 
+             "], percent_full:[" . $percent_full  . "]";
+  
+  if ($percent_full > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent_full > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php
new file mode 100644
index 0000000..19347b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_blocks.php
@@ -0,0 +1,115 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the corrupt or missing blocks % is > threshod
+ * check_jmx -H hostaddress -p port -w 1% -c 1%
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $nn_jmx_property=$options['s'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['u'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $m_percent = 0;
+    $c_percent = 0;
+    $object = $json_array['beans'][0];
+    $missing_blocks = $object['MissingBlocks'];
+    $corrupt_blocks = $object['CorruptBlocks'];
+    $total_blocks = $object['BlocksTotal'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    if($total_blocks == 0) {
+      $m_percent = 0;
+      $c_percent = 0;
+    } else {
+      $m_percent = ($missing_blocks/$total_blocks)*100;
+      $c_percent = ($corrupt_blocks/$total_blocks)*100;
+      break;
+    }
+  }
+  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
+             ">, missing_blocks:<" . $missing_blocks .
+             ">, total_blocks:<" . $total_blocks . ">";
+
+  if ($m_percent > $crit || $c_percent > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($m_percent > $warn || $c_percent > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>


[36/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index bb54a93..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,623 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of Map-side buffer memory to use while sorting files
-    </description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>The percentage of io.sort.mb dedicated to tracking record boundaries. Let this value be r, io.sort.mb be x.
-      The maximum number of records collected before the collection thread must block is equal to (r * x) / 4
-    </description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>The number of streams to merge at once while sorting files. This determines the number of open file handles.
-    </description>
-  </property>
-
-  <!-- map/reduce properties -->
-
-  <property>
-    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-    <value>250</value>
-    <description>Normally, this is the amount of time before killing
-      processes, and the recommended-default is 5.000 seconds - a value of
-      5000 here.  In this case, we are using it solely to blast tasks before
-      killing them, and killing them very quickly (1/4 second) to guarantee
-      that we do not leave VMs around for later jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-      The number of server threads for the JobTracker. This should be roughly
-      4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value>localhost:50300</value>
-    <description>JobTracker address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:50030</value>
-    <description>JobTracker host and http port address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value>/hadoop/mapred</value>
-    <description>The local directory where MapReduce stores intermediate data files. May be a comma-separated list of
-      directories on different devices in order to spread disk i/o. Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>Cluster administrators. Irrespective of the job ACLs configured, cluster administrators always have
-      access to view and modify a job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>The default number of parallel transfers run by reduce
-      during the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value>4</value>
-    <description>The maximum number of map tasks that will be run simultaneously by a task tracker.</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value>2</value>
-    <description>The maximum number of reduce tasks that will be run simultaneously by a task tracker.</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-    <description>The number of worker threads that for the http server. This is used for map output fetching.</description>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-    <description>Fraction of the number of maps in the job which should be complete before reduces are scheduled for the job.</description>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-      for the in-memory merge process. When we accumulate threshold number of files
-      we initiate the in-memory merge and spill to disk. A value of 0 or less than
-      0 indicates we want to DON'T have any threshold and instead depend only on
-      the ramfs's memory consumption to trigger the merge.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapred.job.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.output.compression.type</name>
-    <value>BLOCK</value>
-    <description>If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>0</value>
-    <description>The maximum number of complete jobs per user to keep around before delegating them to the job history.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>The class responsible for scheduling the tasks.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-      "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.limit</name>
-    <value>10737418240</value>
-    <description>The limit on the input size of the reduce. (This value
-      is 10 Gb.)  If the estimated input size of the reduce is greater than
-      this value, job is failed. A value of -1 means that there is no limit
-      set. </description>
-  </property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>The connector to be used by Jetty server.</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>
-      TaskController which is used to launch and manage task execution.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-    <description>Logger configuration for the TaskTracker child processes</description>
-  </property>
-
-  <property>
-    <name>ambari.mapred.child.java.opts.memory</name>
-    <value>768</value>
-    <description>Java options Memory for the TaskTracker child processes</description>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
-    <description>Java options for the TaskTracker child processes</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      The virtual memory size of a single Map slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      The virtual memory size of a single Reduce slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      Virtual memory for single Map task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      Virtual memory for single Reduce task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value>6144</value>
-    <description>
-      Upper limit on virtual memory size for a single Map task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value>4096</value>
-    <description>
-      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts</name>
-    <value>/etc/hadoop/conf/mapred.include</value>
-    <description>
-      Names a file that contains the list of nodes that may
-      connect to the jobtracker.  If the value is empty, all hosts are
-      permitted.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts.exclude</name>
-    <value>/etc/hadoop/conf/mapred.exclude</value>
-    <description>
-      Names a file that contains the list of hosts that
-      should be excluded by the jobtracker.  If the value is empty, no
-      hosts are excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.max.tracker.blacklists</name>
-    <value>16</value>
-    <description>
-      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>/etc/hadoop/conf/health_check</value>
-    <description>
-      Directory path to view job status
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.interval</name>
-    <value>135000</value>
-    <description>Frequency of the node health script to be run, in milliseconds</description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.timeout</name>
-    <value>60000</value>
-    <description>Time after node health script should be killed if unresponsive and considered that the script has failed.</description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
-    <value>false</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.hours</name>
-    <value>1</value>
-    <description>The number of hours job status information is persisted in DFS.
-      The job status information will be available after it drops of the memory
-      queue and between jobtracker restarts. With a zero value the job status
-      information is not persisted at all in DFS.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
-    <value>/mapred/jobstatus</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.check</name>
-    <value>10000</value>
-    <description>Interval for the check for jobs to be retired. </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.interval</name>
-    <value>21600000</value>
-    <description> Completed Job retirement interval. </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>/mapred/history/done</value>
-    <description>The completed job history files are stored at this single well known location.</description>
-  </property>
-
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.maxtasks.per.job</name>
-    <value>-1</value>
-    <final>true</final>
-    <description>The maximum number of tasks for a single job.
-      A value of -1 indicates that there is no maximum.  </description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-    <description>Enable this flag to create _SUCCESS file for successful job. </description>
-  </property>
-
-  <property>
-    <name>mapred.userlog.retain.hours</name>
-    <value>24</value>
-    <description>
-      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reuse.jvm.num.tasks</name>
-    <value>1</value>
-    <description>
-      How many tasks to run per jvm. If set to -1, there is no limit
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      JT user name key.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      tt user name key. "_HOST" is replaced by the host name of the task tracker.
-    </description>
-  </property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <description> Location to store the history files of a particular job. If set to none, then the job histories are
-      not collected to anywhere outside the master node.
-    </description>
-    <final>true</final>
-  </property>
-
-
-  <property>
-    <name>mapreduce.jobtracker.keytab.file</name>
-    <value></value>
-    <description>
-      The keytab for the jobtracker principal.
-    </description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
-    <description>The filename of the keytab for the task tracker</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.http.address</name>
-    <value></value>
-    <description>Http address for task tracker.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>/user</value>
-    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-      name. It is a path in the default file system.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.group</name>
-    <value>hadoop</value>
-    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-      initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-      process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:51111</value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Job history user name key. (must map to same user as JT
-      user)</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-    <value>180</value>
-    <description>
-      3-hour sliding window (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-    <value>15</value>
-    <description>
-      15-minute bucket size (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.names</name>
-    <value>default</value>
-    <description> Comma separated list of queues configured for this jobtracker.</description>
-  </property>
-  
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml
deleted file mode 100644
index 09c9c4f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hadoop.mapreduce.jobsummary.logger</name>
-    <value>${hadoop.root.logger}</value>
-  </property>
-
-  <property>
-    <name>hadoop.mapreduce.jobsummary.log.file</name>
-    <value>hadoop-mapreduce.jobsummary.log</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.JSA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.JSA.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.JSA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.JSA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-
-  <!--Only for HDP1.0 below-->
-  <property>
-    <name>log4j.appender.JSA.File</name>
-    <value>/var/log/hadoop/mapred/${hadoop.mapreduce.jobsummary.log.file}</value>
-  </property>
-
-  <property>
-    <name>log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary</name>
-    <value>${hadoop.mapreduce.jobsummary.logger}</value>
-  </property>
-
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary</name>
-    <value>false</value>
-  </property>
-
-  <!--Only for HDP1.0 above-->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 643b64c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>MAPREDUCE</name>
-      <comment>Apache Hadoop Distributed Processing Framework</comment>
-      <version>1.2.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>JOBTRACKER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/jobtracker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/jobtracker.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>TASKTRACKER</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/tasktracker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MAPREDUCE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality> 
-          <commandScript>
-            <script>scripts/client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-              
-        <component>
-          <name>HISTORYSERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>MAPREDUCE/JOBTRACKER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>capacity-scheduler</config-type>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-queue-acls</config-type>
-        <config-type>mapreduce-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
deleted file mode 100644
index 79c644d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Client(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  Client().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
deleted file mode 100644
index 972a767..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Historyserver(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('historyserver',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('historyserver',
-            action='stop'
-    )
-
-  def status(self, env):
-     import status_params
-     env.set_params(status_params)
-     check_process_status(status_params.historyserver_pid_file)
-
-if __name__ == "__main__":
-  Historyserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
deleted file mode 100644
index 5cd41ae..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Jobtracker(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('jobtracker',
-            action='start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('jobtracker',
-            action='stop'
-    )
-    
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.jobtracker_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-
-    mapred_user = params.mapred_user
-    conf_dir = params.conf_dir
-    user_group = params.user_group
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=mapred_user,
-         group=user_group
-    )
-
-    ExecuteHadoop('mradmin -refreshNodes',
-                user=mapred_user,
-                conf_dir=conf_dir,
-                kinit_override=True)
-    pass
-
-if __name__ == "__main__":
-  Jobtracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
deleted file mode 100644
index c5fd002..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def mapreduce():
-  import params
-
-  Directory([params.mapred_pid_dir,params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory(params.mapred_local_dir,
-            owner=params.mapred_user,
-            mode=0755,
-            recursive=True
-  )
-
-  File(params.exclude_file_path,
-            owner=params.mapred_user,
-            group=params.user_group,
-  )
-
-  File(params.mapred_hosts_file_path,
-            owner=params.mapred_user,
-            group=params.user_group,
-  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
deleted file mode 100644
index d722124..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hadoop/conf"
-
-mapred_user = status_params.mapred_user
-pid_dir_prefix = status_params.pid_dir_prefix
-mapred_pid_dir = status_params.mapred_pid_dir
-
-historyserver_pid_file = status_params.historyserver_pid_file
-jobtracker_pid_file = status_params.jobtracker_pid_file
-tasktracker_pid_file = status_params.tasktracker_pid_file
-
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_bin = "/usr/lib/hadoop/bin"
-user_group = config['configurations']['global']['user_group']
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-mapred_log_dir = format("{hdfs_log_dir_prefix}/{mapred_user}")
-mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-
-hadoop_jar_location = "/usr/lib/hadoop/"
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#exclude file
-mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
-exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
-mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
deleted file mode 100644
index f4aa91b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-def service(
-    name,
-    action='start'):
-
-  import params
-
-  pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-{name}.pid")
-  hadoop_daemon = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{cmd} start {name}")
-    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            user=params.mapred_user,
-            not_if=no_op
-    )
-
-    Execute(no_op,
-            user=params.mapred_user,
-            not_if=no_op,
-            initial_wait=5
-    )
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {name}")
-    rm_pid =  format("rm -f {pid_file}")
-
-    Execute(daemon_cmd,
-            user=params.mapred_user
-    )
-    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
deleted file mode 100644
index c0a4a59..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_location = params.hadoop_jar_location
-    input_file = 'mapredsmokeinput'
-    output_file = "mapredsmokeoutput"
-
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    create_file_cmd = format("{cleanup_cmd} ; hadoop dfs -put /etc/passwd {input_file}")
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_location}/hadoop-examples.jar wordcount {input_file} {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
-      Execute(kinit_cmd,
-              user=params.smokeuser
-      )
-
-    ExecuteHadoop(create_file_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.conf_dir
-    )
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.conf_dir,
-                  logoutput=True
-    )
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  conf_dir=params.conf_dir
-    )
-
-if __name__ == "__main__":
-  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
deleted file mode 100644
index f964a76..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-mapred_user = config['configurations']['global']['mapred_user']
-pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
-
-jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")
-tasktracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-tasktracker.pid")
-historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
deleted file mode 100644
index 77d974b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Tasktracker(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('tasktracker',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('tasktracker',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.tasktracker_pid_file)
-
-if __name__ == "__main__":
-  Tasktracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index 02fc5fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in mr_exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
deleted file mode 100644
index a4c500d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-            <commandScript>
-              <script>scripts/nagios_server.py</script>
-              <scriptType>PYTHON</scriptType>
-              <timeout>600</timeout>
-            </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>perl</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>fping</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>redhat5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>oraclelinux5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 19347b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $c_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $corrupt_blocks = $object['CorruptBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-      $c_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      $c_percent = ($corrupt_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
-             ">, missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;


[48/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..2d91e75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='regionserver')
+      
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseRegionServer().execute()
+  
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..7a1248b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {conf_dir}")
+    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
+    
+    daemon_cmd = None
+    no_op_test = None
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
+
+    if daemon_cmd is not None:
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..1d57fc9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from functions import calc_xmn_from_xms
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hbase/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/region_drainer.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_excluded_hosts = default("/commandParams/excluded_hosts", "")
+hbase_drain_only = default("/commandParams/mark_draining_only", "")
+
+hbase_user = config['configurations']['global']['hbase_user']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+user_group = config['configurations']['global']['user_group']
+
+# this is "hadoop-metrics2-hbase.properties" for 2.x stacks
+metric_prop_file_name = "hadoop-metrics.properties" 
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['global']['hbase_log_dir']
+master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
+master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
+regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+smoke_test_user = config['configurations']['global']['smokeuser']
+smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+service_check_data = get_unique_id_and_date()
+
+if security_enabled:
+  
+  _use_hostname_in_principal = default('instance_name', True)
+  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
+  _hostname = config['hostname']
+  _kerberos_domain = config['configurations']['global']['kerberos_domain']
+  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
+  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
+  
+  if _use_hostname_in_principal:
+    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
+  else:
+    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
+    
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#log4j.properties
+if ('hbase-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hbase-log4j']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..ff6d0ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
+  
+    File( '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+def main():
+  import sys
+  command_type = 'perform'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseServiceCheck().execute()
+  
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..c9b20ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..1c75d15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..e971e13
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties.j2
new file mode 100644
index 0000000..1c75d15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hadoop-metrics.properties.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..b8505b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..61fe62f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..3b3bb18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..9102d35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..9cf35d3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..bd1d727
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/regionservers.j2
new file mode 100644
index 0000000..b22ae5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,2 @@
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/configuration/global.xml
deleted file mode 100644
index b0c7eb6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/configuration/global.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 818914e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.11.0.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..4fd0b22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,305 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hadoop.root.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>.</value>
+  </property>
+  <property>
+    <name>hadoop.log.file</name>
+    <value>hadoop.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hadoop.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.taskid</name>
+    <value>null</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.iscleanup</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.noKeepSplits</name>
+    <value>4</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.totalLogFileSize</name>
+    <value>100</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.purgeLogSplits</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.logsRetainHours</name>
+    <value>12</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA</name>
+    <value>org.apache.hadoop.mapred.TaskLogAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.taskId</name>
+    <value>${hadoop.tasklog.taskid}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.isCleanup</name>
+    <value>${hadoop.tasklog.iscleanup}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.totalLogFileSize</name>
+    <value>${hadoop.tasklog.totalLogFileSize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.security.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.category.SecurityLogger</name>
+    <value>${hadoop.security.logger}</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.file</name>
+    <value>SecurityAuth.audit</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.File</name>
+    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.File</name>
+    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxFileSize</name>
+    <value>${hadoop.security.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxBackupIndex</name>
+    <value>${hadoop.security.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>hdfs.audit.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
+    <value>${hdfs.audit.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.File</name>
+    <value>${hadoop.log.dir}/hdfs-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>mapred.audit.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.mapred.AuditLogger</name>
+    <value>${mapred.audit.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.mapred.AuditLogger</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.File</name>
+    <value>${hadoop.log.dir}/mapred-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.File</name>
+    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxFileSize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxBackupIndex</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.metrics.log.level</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.metrics2</name>
+    <value>${hadoop.metrics.log.level}</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service</name>
+    <value>ERROR</value>
+  </property>
+  <property>
+    <name>log4j.appender.NullAppender</name>
+    <value>org.apache.log4j.varia.NullAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.log.metrics.EventCounter</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
index 99882bc..d29d2fc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
@@ -16,35 +16,132 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.2.0.1.3.2.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>1.2.0.1.3.3.0</version>
 
-    <components>
+      <components>
         <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>hdfs-site</config-type>
-      <config-type>hadoop-policy</config-type>
-    </configuration-dependencies>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-pipes</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-sbin</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..eaa27cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_datanode import datanode
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    datanode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    datanode(action="stop")
+
+  def config(self, env):
+    import params
+
+    datanode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..8180689
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import params
+
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..b033185
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,60 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+import os
+
+def datanode(action=None):
+  import params
+
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    for data_dir in params.dfs_data_dir.split(","):
+      Directory(os.path.dirname(data_dir),
+                recursive=True,
+                mode=0755)
+      Directory(data_dir,
+                recursive=False,
+                mode=0750,
+                owner=params.hdfs_user,
+                group=params.user_group)
+
+  if action == "start":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )
+  if action == "stop":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..d8e191f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,192 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+import urlparse
+
+
+def namenode(action=None, format=True):
+  import params
+
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if format:
+      format_namenode()
+      pass
+    service(
+      action="start", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      create_pid_dir=True,
+      create_log_dir=True,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+    # TODO: extract creating of dirs to different services
+    create_app_directories()
+    create_user_directories()
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+  if action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+
+def create_app_directories():
+  import params
+
+  hdfs_directory(name="/tmp",
+                 owner=params.hdfs_user,
+                 mode="777"
+  )
+  #mapred directories
+  if params.has_jobtracker:
+    hdfs_directory(name="/mapred",
+                   owner=params.mapred_user
+    )
+    hdfs_directory(name="/mapred/system",
+                   owner=params.mapred_user
+    )
+    #hbase directories
+  if len(params.hbase_master_hosts) != 0:
+    hdfs_directory(name=params.hbase_hdfs_root_dir,
+                   owner=params.hbase_user
+    )
+    hdfs_directory(name=params.hbase_staging_dir,
+                   owner=params.hbase_user,
+                   mode="711"
+    )
+    #hive directories
+  if len(params.hive_server_host) != 0:
+    hdfs_directory(name=params.hive_apps_whs_dir,
+                   owner=params.hive_user,
+                   mode="777"
+    )
+  if len(params.hcat_server_hosts) != 0:
+    hdfs_directory(name=params.webhcat_apps_dir,
+                   owner=params.webhcat_user,
+                   mode="755"
+    )
+  if len(params.hs_host) != 0:
+    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+  pass
+
+
+def create_user_directories():
+  import params
+
+  hdfs_directory(name=params.smoke_hdfs_user_dir,
+                 owner=params.smoke_user,
+                 mode=params.smoke_hdfs_user_mode
+  )
+
+  if params.has_hive_server_host:
+    hdfs_directory(name=params.hive_hdfs_user_dir,
+                   owner=params.hive_user,
+                   mode=params.hive_hdfs_user_mode
+    )
+
+  if params.has_hcat_server_host:
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      hdfs_directory(name=params.hcat_hdfs_user_dir,
+                     owner=params.hcat_user,
+                     mode=params.hcat_hdfs_user_mode
+      )
+    hdfs_directory(name=params.webhcat_hdfs_user_dir,
+                   owner=params.webhcat_user,
+                   mode=params.webhcat_hdfs_user_mode
+    )
+
+  if params.has_oozie_server:
+    hdfs_directory(name=params.oozie_hdfs_user_dir,
+                   owner=params.oozie_user,
+                   mode=params.oozie_hdfs_user_mode
+    )
+
+
+def format_namenode(force=None):
+  import params
+
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if True:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True)
+    else:
+      File('/tmp/checkForFormat.sh',
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{dfs_name_dir}"),
+              not_if=format("test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
+    Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=params.user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..a943455
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+  elif action == "start":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )
+  elif action == "stop":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..80700c8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,66 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_namenode import namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    namenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="configure")
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000..e172501
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
@@ -0,0 +1,170 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+import os
+
+config = Script.get_config()
+
+if System.get_instance().os_type == "oraclelinux":
+  ulimit_cmd = ''
+else:
+  ulimit_cmd = "ulimit -c unlimited && if [ `ulimit -c` != 'unlimited' ]; then exit 77; fi && "
+
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = status_params.hdfs_user
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group = "users"
+
+#hadoop params
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+hadoop_bin = "/usr/lib/hadoop/bin"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+
+dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket"
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
+
+# if stack_version[0] == "2":
+#dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+# else:
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.name.dir']#","/tmp/hadoop-hdfs/dfs/name")
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
+hbase_staging_dir = "/apps/hbase/staging"
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
+webhcat_apps_dir = "/apps/webhcat"
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
+
+if has_oozie_server:
+  oozie_hdfs_user_dir = format("/user/{oozie_user}")
+  oozie_hdfs_user_mode = 775
+if has_hcat_server_host:
+  hcat_hdfs_user_dir = format("/user/{hcat_user}")
+  hcat_hdfs_user_mode = 755
+  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+  webhcat_hdfs_user_mode = 755
+if has_hive_server_host:
+  hive_hdfs_user_dir = format("/user/{hive_user}")
+  hive_hdfs_user_mode = 700
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 770
+
+namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+
+# if stack_version[0] == "2":
+#fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
+# else:
+fs_checkpoint_dir = config['configurations']['core-site']['fs.checkpoint.dir']#","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+# if stack_version[0] == "2":
+#dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+# else:
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+
+
+
+


[42/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100644
index 0000000..6b3553d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File("/tmp/zkSmoke.sh",
+         mode=0755,
+         content=StaticFile('zkSmoke.sh')
+    )
+
+    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
+                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
+                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd_qourum,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/status_params.py
new file mode 100644
index 0000000..98f2903
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+zk_pid_dir = config['configurations']['global']['zk_pid_dir']
+zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper.py
new file mode 100644
index 0000000..7b74b4c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+
+def zookeeper(type = None):
+  import params
+
+  Directory(params.config_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  configFile("zoo.cfg", template_name="zoo.cfg.j2")
+  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
+  configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+  Directory(params.zk_pid_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_log_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_data_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  if type == 'server':
+    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+
+    File(format("{zk_data_dir}/myid"),
+         mode = 0644,
+         content = myid
+    )
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+      dir=params.config_dir,
+      properties=params.log4j_props,
+      mode=0664,
+      owner=params.zk_user,
+      group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
+    File(format("{params.config_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.zk_user
+    )
+
+  if params.security_enabled:
+    if type == "server":
+      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+    else:
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+
+  File(format("{config_dir}/zoo_sample.cfg"),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{config_dir}/{name}"),
+       content=Template(template_name),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_client.py
new file mode 100644
index 0000000..028a37d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+
+class ZookeeperClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    zookeeper(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py
new file mode 100644
index 0000000..e8cc264
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+from zookeeper_service import zookeeper_service
+
+class ZookeeperServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    zookeeper(type='server')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    zookeeper_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    zookeeper_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zk_pid_file)
+
+if __name__ == "__main__":
+  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_service.py
new file mode 100644
index 0000000..83b8f08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_service.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def zookeeper_service(action='start'):
+  import params
+
+  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
+
+  if action == 'start':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
+    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.zk_user
+    )
+  elif action == 'stop':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
+    rm_pid = format("rm -f {zk_pid_file}")
+    Execute(daemon_cmd,
+            user=params.zk_user
+    )
+    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/configuration.xsl.j2
new file mode 100644
index 0000000..ca498b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/configuration.xsl.j2
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zoo.cfg.j2
new file mode 100644
index 0000000..5b68218
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zoo.cfg.j2
@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The number of milliseconds of each tick
+tickTime={{tickTime}}
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit={{initLimit}}
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit={{syncLimit}}
+# the directory where the snapshot is stored.
+dataDir={{zk_data_dir}}
+# the port at which the clients will connect
+clientPort={{clientPort}}
+{% for host in zookeeper_hosts %}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled %}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}
+
+{% if zoo_cfg_properties_map_length > 0 %}
+# Custom properties
+{% endif %}
+{% for key, value in zoo_cfg_properties_map.iteritems() %}
+{{key}}={{value}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
new file mode 100644
index 0000000..493a2a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export JAVA_HOME={{java64_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
new file mode 100644
index 0000000..c70449d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
@@ -0,0 +1,22 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
new file mode 100644
index 0000000..639cdaa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{zk_keytab_path}}"
+principal="{{zk_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh
deleted file mode 100644
index 4872a10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index 51e5cd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_users()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index fa19ca3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group =  "users"
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 26a7592..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  Group(params.user_group)
-  Group(params.smoke_user_group)
-  Group(params.proxyuser_group)
-  User(params.smoke_user,
-       gid=params.user_group,
-       groups=[params.proxyuser_group]
-  )
-  smoke_user_dirs = format(
-    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-  set_uid(params.smoke_user, smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    User(params.hbase_user,
-         gid = params.user_group,
-         groups=[params.user_group])
-    hbase_user_dirs = format(
-      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-    set_uid(params.hbase_user, hbase_user_dirs)
-
-  if params.has_nagios:
-    Group(params.nagios_group)
-    User(params.nagios_user,
-         gid=params.nagios_group)
-
-  if params.has_oozie_server:
-    User(params.oozie_user,
-         gid = params.user_group)
-
-  if params.has_hcat_server_host:
-    User(params.webhcat_user,
-         gid = params.user_group)
-    User(params.hcat_user,
-         gid = params.user_group)
-
-  if params.has_hive_server_host:
-    User(params.hive_user,
-         gid = params.user_group)
-
-  if params.has_resourcemanager:
-    User(params.yarn_user,
-         gid = params.user_group)
-
-  if params.has_ganglia_server:
-    Group(params.gmetad_user)
-    Group(params.gmond_user)
-    User(params.gmond_user,
-         gid=params.user_group,
-        groups=[params.gmond_user])
-    User(params.gmetad_user,
-         gid=params.user_group,
-        groups=[params.gmetad_user])
-
-  User(params.hdfs_user,
-        gid=params.user_group,
-        groups=[params.user_group]
-  )
-  User(params.mapred_user,
-       gid=params.user_group,
-       groups=[params.user_group]
-  )
-  if params.has_zk_host:
-    User(params.zk_user,
-         gid=params.user_group)
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  File("/tmp/changeUid.sh",
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-
-def install_packages():
-  Package("unzip")
-  Package("net-snmp")
-  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index c8939fc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py
deleted file mode 100644
index e11bfac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_java()
-    setup_hadoop()
-    setup_configs()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py
deleted file mode 100644
index 211c2bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#java params
-artifact_dir = "/tmp/HDP-artifacts/"
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#users and groups
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-yarn_user = config['configurations']['global']['yarn_user']
-
-user_group = config['configurations']['global']['user_group']
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#snmp
-snmp_conf_dir = "/etc/snmp/"
-snmp_source = "0.0.0.0/0"
-snmp_community = "hadoop"
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-#hadoop params
-hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/bin"
-
-task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-limits_conf_dir = "/etc/security/limits.d"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url']
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
-
-rca_enabled = config['configurations']['global']['rca_enabled']
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-if System.get_instance().os_family == "suse":
-  jsvc_path = "/usr/lib/bigtop-utils"
-else:
-  jsvc_path = "/usr/libexec/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-#hdfs ha properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namenode_ids:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-rca_property_map = {
-  'ambari.jobhistory.database': ambari_db_rca_url,
-  'ambari.jobhistory.driver': ambari_db_rca_driver,
-  'ambari.jobhistory.user': ambari_db_rca_username,
-  'ambari.jobhistory.password': ambari_db_rca_password,
-  'ambari.jobhistory.logger': 'DEBUG,JHA',
-
-  'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
-  'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
-  'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
-  'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
-  'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
-
-  'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
-  'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
-}
-
-if ('hdfs-log4j' in config['configurations']):
-  log4j_props = config['configurations']['hdfs-log4j']
-  if 'mapreduce-log4j' in config['configurations']:
-    log4j_props.update(config['configurations']['mapreduce-log4j'])
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index d6521ee..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,317 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
-  java_dir = os.path.dirname(params.java_home)
-  java_exec = format("{java_home}/bin/java")
-  
-  if not params.jdk_name:
-    return
-  
-  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}"))
-
-  if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
-  elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
-  
-  Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}")
-  )
-  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
-  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
-  Execute( download_jce,
-        path = ["/bin","/usr/bin/"],
-        not_if =format("test -e {jce_curl_target}"),
-        ignore_failures = True
-  )
-  
-  if params.security_enabled:
-    security_dir = format("{java_home}/jre/lib/security")
-    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
-    Execute(extract_cmd,
-          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
-          cwd  = security_dir,
-          path = ['/bin/','/usr/bin']
-    )
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
-       content=Template("snmpd.conf.j2"))
-  Service("snmpd",
-          action = "restart")
-
-  Execute("/bin/echo 0 > /selinux/enforce",
-          only_if="test -f /selinux/enforce"
-  )
-
-  install_snappy()
-
-  #directories
-  Directory(params.hadoop_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hdfs_log_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hadoop_pid_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-
-  #files
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if tc_mode:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
-    File(os.path.join(params.hadoop_conf_dir, file),
-         owner=tc_owner,
-         content=Template(file + ".j2")
-    )
-
-  health_check_template = "health_check" #for stack 1 use 'health_check'
-  File(os.path.join(params.hadoop_conf_dir, "health_check"),
-       owner=tc_owner,
-       content=Template(health_check_template + ".j2")
-  )
-
-  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-  if (params.log4j_props != None):
-    PropertiesFile(log4j_filename,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.hdfs_user,
-                   group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-    File(log4j_filename,
-         mode=0644,
-         group=params.user_group,
-         owner=params.hdfs_user,
-    )
-
-  update_log4j_props(log4j_filename)
-
-  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-       owner=params.hdfs_user,
-       content=Template("hadoop-metrics2.properties.j2")
-  )
-
-  db_driver_dload_cmd = ""
-  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-
-  if db_driver_dload_cmd:
-    Execute(db_driver_dload_cmd,
-            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
-    )
-
-
-def setup_configs():
-  """
-  Creates configs for services DHFS mapred
-  """
-  import params
-
-  if "mapred-queue-acls" in params.config['configurations']:
-    XmlConfig("mapred-queue-acls.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'mapred-queue-acls'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-  elif os.path.exists(
-      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
-    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-
-  File(params.task_log4j_properties_location,
-       content=StaticFile("task-log4j.properties"),
-       mode=0755
-  )
-
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  # if params.stack_version[0] == "1":
-  Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
-       to = '/usr/lib/hadoop/hadoop-tools.jar'
-  )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
-    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-    File(os.path.join(params.hadoop_conf_dir, 'masters'),
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  # generate_include_file()
-
-def update_log4j_props(file):
-  import params
-
-  for key in params.rca_property_map:
-    value = params.rca_property_map[key]
-    Execute(format(
-      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}")
-    )
-
-
-def generate_include_file():
-  import params
-
-  if params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-
-def install_snappy():
-  import params
-
-  snappy_so = "libsnappy.so"
-  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-  so_src_dir_x86 = format("{hadoop_home}/lib")
-  so_src_dir_x64 = format("{hadoop_home}/lib64")
-  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  Execute(
-    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-  Execute(
-    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 77e458f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index bb5795b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2
deleted file mode 100644
index 51e2bac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2
+++ /dev/null
@@ -1,121 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-# this is different for HDP1 #
-# Path to jsvc required by secure HDP 2.0 datanode
-# export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER={{hdfs_user}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index a6a66ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2
deleted file mode 100644
index ca7baa2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2
deleted file mode 100644
index cb7b12b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index b84b336..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}


[12/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index acb2522..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1349 +0,0 @@
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=1
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=0
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index d8936a0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
-
-<Directory "/usr/lib/nagios/cgi">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "/usr/share/nagios"
-
-<Directory "/usr/share/nagios">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 01e21ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="/etc/nagios/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 23c7a56..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-log4j.xml
deleted file mode 100644
index 169085c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-log4j.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>log4j.appender.oozie</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.oozie.DatePattern</name>
-    <value>.yyyy-MM-dd-HH</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.File</name>
-    <value>${oozie.log.dir}/oozie.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.oozieops</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.File</name>
-    <value>${oozie.log.dir}/oozie-ops.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.File</name>
-    <value>${oozie.log.dir}/oozie-instrumentation.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.File</name>
-    <value>${oozie.log.dir}/oozie-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.File</name>
-    <value>${oozie.log.dir}/oozie-jpa.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.logger.openjpa</name>
-    <value>INFO, openjpa</value>
-  </property>
-  <property>
-    <name>log4j.logger.oozieops</name>
-    <value>INFO, oozieops</value>
-  </property>
-  <property>
-    <name>log4j.logger.oozieinstrumentation</name>
-    <value>ALL, oozieinstrumentation</value>
-  </property>
-  <property>
-    <name>log4j.logger.oozieaudit</name>
-    <value>ALL, oozieaudit</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.oozie</name>
-    <value>INFO, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop</name>
-    <value>WARN, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.mortbay</name>
-    <value>WARN, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.hsqldb</name>
-    <value>WARN, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.security.authentication.server</name>
-    <value>INFO, oozie</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index c149fa4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,329 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.security.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
-      #AUTHENTICATION_HANDLER_CLASSNAME#.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted NameNode for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>
-      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      DEFAULT
-    </value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>
-      org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor
-    </value>
-    <description>
-      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
-      be used in workflows. This property is a convenience property to add extensions to the built in executors without
-      having to include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>
-      shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd
-    </value>
-    <description>
-      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
-      trims the value, if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      Database user name to use to connect to the database
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value></value>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.services</name>
-    <value>
-      org.apache.oozie.service.SchedulerService,
-      org.apache.oozie.service.InstrumentationService,
-      org.apache.oozie.service.CallableQueueService,
-      org.apache.oozie.service.UUIDService,
-      org.apache.oozie.service.ELService,
-      org.apache.oozie.service.AuthorizationService,
-      org.apache.oozie.service.UserGroupInformationService,
-      org.apache.oozie.service.HadoopAccessorService,
-      org.apache.oozie.service.URIHandlerService,
-      org.apache.oozie.service.MemoryLocksService,
-      org.apache.oozie.service.DagXLogInfoService,
-      org.apache.oozie.service.SchemaService,
-      org.apache.oozie.service.LiteWorkflowAppService,
-      org.apache.oozie.service.JPAService,
-      org.apache.oozie.service.StoreService,
-      org.apache.oozie.service.CoordinatorStoreService,
-      org.apache.oozie.service.SLAStoreService,
-      org.apache.oozie.service.DBLiteWorkflowStoreService,
-      org.apache.oozie.service.CallbackService,
-      org.apache.oozie.service.ActionService,
-      org.apache.oozie.service.ActionCheckerService,
-      org.apache.oozie.service.RecoveryService,
-      org.apache.oozie.service.PurgeService,
-      org.apache.oozie.service.CoordinatorEngineService,
-      org.apache.oozie.service.BundleEngineService,
-      org.apache.oozie.service.DagEngineService,
-      org.apache.oozie.service.CoordMaterializeTriggerService,
-      org.apache.oozie.service.StatusTransitService,
-      org.apache.oozie.service.PauseTransitService,
-      org.apache.oozie.service.GroupsService,
-      org.apache.oozie.service.ProxyUserService
-    </value>
-    <description>List of Oozie services</description>
-  </property>
-  <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-    <description>
-      Enlist the different uri handlers supported for data availability checks.
-    </description>
-  </property>
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
-    </value>
-    <description>
-      To add/replace services defined in 'oozie.services' with custom implementations.
-      Class names must be separated by commas.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-    <description>
-      Command re-queue interval for push dependencies (in millisecond).
-    </description>
-  </property>
-  <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-    <description>
-      Credential Class to be used for HCat.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml
deleted file mode 100644
index 9a2f0b9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>OOZIE</name>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>4.0.0.2.1.1</version>
-      <components>
-        <component>
-          <name>OOZIE_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>OOZIE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>oozie.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>oozie-client.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>extjs-2.2-1</name>
-            </package>
-            <!--TODO: uncomment this after package will be available in repo-->
-            <!--<package>-->
-              <!--<type>rpm</type>-->
-              <!--<name>falcon-0.4.0.2.0.6.0-76</name>-->
-            <!--</package>-->
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>oozie-site</config-type>
-        <config-type>oozie-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh
deleted file mode 100644
index 2cb5a7a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
-su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-echo $cmd
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh
deleted file mode 100644
index 97a513c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py
deleted file mode 100644
index 99b42ea..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-
-def oozie(is_server=False # TODO: see if see can remove this
-              ):
-  import params
-  #TODO hack for falcon el
-  oozie_site = dict(params.config['configurations']['oozie-site'])
-  oozie_site["oozie.services.ext"] = 'org.apache.oozie.service.JMSAccessorService,' + oozie_site["oozie.services.ext"]
-  XmlConfig( "oozie-site.xml",
-    conf_dir = params.conf_dir, 
-    configurations = oozie_site,
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0664
-  )
-  Directory( params.conf_dir,
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
-  )
-
-  if (params.log4j_props != None):
-    PropertiesFile('oozie-log4j.properties',
-      dir=params.conf_dir,
-      properties=params.log4j_props,
-      mode=0664,
-      owner=params.oozie_user,
-      group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
-    File(format("{params.conf_dir}/oozie-log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.oozie_user
-    )
-
-  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
-    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
-     -o {check_db_connection_jar_name}'"),
-      not_if  = format("[ -f {check_db_connection_jar} ]")
-    )
-    
-  oozie_ownership( )
-  
-  if is_server:      
-    oozie_server_specific( )
-  
-def oozie_ownership(
-):
-  import params
-  
-  File ( format("{conf_dir}/adminusers.txt"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/hadoop-config.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/oozie-default.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  Directory ( format("{conf_dir}/action-conf"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/action-conf/hive.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-def oozie_server_specific(
-):
-  import params
-  
-  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
-  Directory( oozie_server_directorties,
-    owner = params.oozie_user,
-    mode = 0755,
-    recursive = True
-  )
-       
-  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
-  
-  # this is different for HDP1
-  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir} && mkdir -p {oozie_libext_dir} && cp {ext_js_path} {oozie_libext_dir}")
-  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    cmd3 += format(" && cp {jdbc_driver_jar} {oozie_libext_dir}")
-  #falcon el extension
-  if params.has_falcon_host:
-    cmd3 += format(' && cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-0.4.0.2.0.6.0-76.jar {oozie_libext_dir}')
-  # this is different for HDP1
-  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh prepare-war")
-  
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  Execute( [cmd1, cmd2, cmd3],
-    not_if  = no_op_test
-  )
-  Execute( cmd4,
-    user = params.oozie_user,
-    not_if  = no_op_test
-  )
-  


[49/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrd.py
new file mode 100644
index 0000000..3fe6901
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrd.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import cgi
+import os
+import rrdtool
+import sys
+import time
+import re
+import urlparse
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+
+  args = [file, cf]
+
+  if start is not None:
+    args.extend(["-s", start])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  rrdMetric = rrdtool.fetch(args)
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    valueCount = 0
+    lastValue = None
+
+    for tuple in rrdMetric[2]:
+
+      thisValue = tuple[0]
+
+      if valueCount > 0 and thisValue == lastValue:
+        valueCount += 1
+      else:
+        if valueCount > 1:
+          sys.stdout.write("[~r]")
+          sys.stdout.write(str(valueCount))
+          sys.stdout.write("\n")
+
+        if thisValue is None:
+          sys.stdout.write("[~n]\n")
+        else:
+          sys.stdout.write(str(thisValue))
+          sys.stdout.write("\n")
+
+        valueCount = 1
+        lastValue = thisValue
+  else:
+    value = None
+    idx = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx -= 1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+  return
+
+
+def stripList(l):
+  return ([x.strip() for x in l])
+
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+requestMethod = os.environ['REQUEST_METHOD']
+
+if requestMethod == 'POST':
+  postData = sys.stdin.readline()
+  queryString = cgi.parse_qs(postData)
+  queryString = dict((k, v[0]) for k, v in queryString.items())
+elif requestMethod == 'GET':
+  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "/var/lib/ganglia/rrds/"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+def _walk(*args, **kwargs):
+  for root, dirs, files in os.walk(*args, **kwargs):
+    for dir in dirs:
+      qualified_dir = os.path.join(root, dir)
+      if os.path.islink(qualified_dir):
+        for x in os.walk(qualified_dir, **kwargs):
+          yield x
+    yield (root, dirs, files)
+
+
+for cluster in clusterParts:
+  for path, dirs, files in _walk(rrdPath + cluster):
+    pathParts = path.split("/")
+    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
+    #If host parameter passed - process only this host folder
+    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
+      for metric in metricParts:
+        file = metric + ".rrd"
+        fileFullPath = os.path.join(path, file)
+        if os.path.exists(fileFullPath):
+          #Exact name of metric
+          printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                      os.path.join(path, file), cf, start, end, resolution,
+                      pointInTime)
+        else:
+          #Regex as metric name
+          metricRegex = metric + '\.rrd$'
+          p = re.compile(metricRegex)
+          matchedFiles = filter(p.match, files)
+          for matchedFile in matchedFiles:
+            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                        os.path.join(path, matchedFile), cf, start, end,
+                        resolution, pointInTime)
+
+sys.stdout.write("[~EOF]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrdcachedLib.sh
new file mode 100644
index 0000000..8b7c257
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/rrdcachedLib.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/setupGanglia.sh
new file mode 100644
index 0000000..5145b9c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/setupGanglia.sh
@@ -0,0 +1,141 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+  -o <owner>              Owner
+  -g <group>              Group
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+owner='root';
+group='root';
+
+while getopts ":c:mto:g:" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    o)
+      owner=${OPTARG};
+      ;;
+    g)
+      group=${OPTARG};
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmetad.sh
new file mode 100644
index 0000000..ab5102d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmetad.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        for i in `seq 0 5`; do
+          gmetadRunningPid=`getGmetadRunningPid`;
+          if [ -n "${gmetadRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmond.sh
new file mode 100644
index 0000000..239b62e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startGmond.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+
+        for i in `seq 0 5`; do
+          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+          if [ -n "${gmondRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startRrdcached.sh
new file mode 100644
index 0000000..e79472b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/startRrdcached.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    for i in `seq 0 5`; do
+      rrdcachedRunningPid=`getRrdcachedRunningPid`;
+      if [ -n "${rrdcachedRunningPid}" ]
+        then
+          break;
+      fi
+      sleep 1;
+    done
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmetad.sh
new file mode 100644
index 0000000..2764e0e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmond.sh
new file mode 100644
index 0000000..1af3eb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -KILL ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopRrdcached.sh
new file mode 100644
index 0000000..0a0d8d8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/teardownGanglia.sh
new file mode 100644
index 0000000..b27f7a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia.py
new file mode 100644
index 0000000..52b79be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia.py
@@ -0,0 +1,106 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import os
+
+
+def groups_and_users():
+  import params
+
+  Group(params.user_group)
+  Group(params.gmetad_user)
+  Group(params.gmond_user)
+  User(params.gmond_user,
+       groups=[params.gmond_user])
+  User(params.gmetad_user,
+       groups=[params.gmetad_user])
+
+
+def config():
+  import params
+
+  shell_cmds_dir = params.ganglia_shell_cmds_dir
+  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
+                 'gmondLib.sh', 'rrdcachedLib.sh',
+                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
+                 'startRrdcached.sh', 'stopGmetad.sh',
+                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
+  Directory(shell_cmds_dir,
+            owner="root",
+            group="root",
+            recursive=True
+  )
+  init_file("gmetad")
+  init_file("gmond")
+  for sh_file in shell_files:
+    shell_file(sh_file)
+  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
+    ganglia_TemplateConfig(conf_file)
+
+
+def init_file(name):
+  import params
+
+  File("/etc/init.d/hdp-" + name,
+       content=StaticFile(name + ".init"),
+       mode=0755
+  )
+
+
+def shell_file(name):
+  import params
+
+  File(params.ganglia_shell_cmds_dir + os.sep + name,
+       content=StaticFile(name),
+       mode=0755
+  )
+
+
+def ganglia_TemplateConfig(name, mode=0755, tag=None):
+  import params
+
+  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
+                 owner="root",
+                 group="root",
+                 template_tag=tag,
+                 mode=mode
+  )
+
+
+def generate_daemon(ganglia_service,
+                    name=None,
+                    role=None,
+                    owner=None,
+                    group=None):
+  import params
+
+  cmd = ""
+  if ganglia_service == "gmond":
+    if role == "server":
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
+    else:
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
+  elif ganglia_service == "gmetad":
+    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
+  else:
+    raise Fail("Unexpected ganglia service")
+  Execute(format(cmd),
+          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
+                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py
new file mode 100644
index 0000000..207331b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py
@@ -0,0 +1,140 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_monitor_service
+
+
+class GangliaMonitor(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+
+  def start(self, env):
+    ganglia_monitor_service.monitor("start")
+
+  def stop(self, env):
+    ganglia_monitor_service.monitor("stop")
+
+
+  def status(self, env):
+    import status_params
+    pid_file_name = 'gmond.pid'
+    pid_file_count = 0
+    pid_dir = status_params.pid_dir
+    # Recursively check all existing gmond pid files
+    for cur_dir, subdirs, files in os.walk(pid_dir):
+      for file_name in files:
+        if file_name == pid_file_name:
+          pid_file = os.path.join(cur_dir, file_name)
+          check_process_status(pid_file)
+          pid_file_count += 1
+    if pid_file_count == 0: # If no any pid file is present
+      raise ComponentIsNotRunning()
+
+
+  def configure(self, env):
+    import params
+
+    ganglia.groups_and_users()
+
+    Directory(params.ganglia_conf_dir,
+              owner="root",
+              group=params.user_group,
+              recursive=True
+    )
+
+    ganglia.config()
+
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_rmnode_master:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    pure_slave = not (params.is_namenode_master and
+                      params.is_jtnode_master and
+                      params.is_rmnode_master and
+                      params.is_hsnode_master and
+                      params.is_hbase_master) and params.is_slave
+    if pure_slave:
+      generate_daemon("gmond",
+                      name = "HDPSlaves",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+
+    Directory(path.join(params.ganglia_dir, "conf.d"),
+              owner="root",
+              group=params.user_group
+    )
+
+    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "gmond.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+if __name__ == "__main__":
+  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor_service.py
new file mode 100644
index 0000000..d86d894
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_monitor_service.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def monitor(action=None):# 'start' or 'stop'
+  if action == "start":
+    Execute("chkconfig gmond off",
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    )
+  Execute(
+    format(
+      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
+    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server.py
new file mode 100644
index 0000000..7fa747a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server.py
@@ -0,0 +1,181 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/gmetad.pid")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def configure(self, env):
+    import params
+
+    ganglia.groups_and_users()
+    ganglia.config()
+
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_resourcemanager:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+    generate_daemon("gmetad",
+                    name = "gmetad",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+    change_permission()
+    server_files()
+    File(path.join(params.ganglia_dir, "gmetad.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+def change_permission():
+  import params
+
+  Directory('/var/lib/ganglia/dwoo',
+            mode=0777,
+            owner=params.gmetad_user,
+            recursive=True
+  )
+
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+            recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  File(rrd_py_file_path,
+       content=StaticFile("rrd.py"),
+       mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
+    Directory(params.rrdcached_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              mode=0755,
+              recursive=True
+    )
+    Directory(params.rrdcached_default_base_dir,
+              action = "delete"
+    )
+    Link(params.rrdcached_default_base_dir,
+         to=params.rrdcached_base_dir
+    )
+  elif rrd_file_owner != 'nobody':
+    Directory(params.rrdcached_default_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              recursive=True
+    )
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server_service.py
new file mode 100644
index 0000000..b93e3f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def server(action=None):# 'start' or 'stop'
+  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  Execute(format(command),
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
new file mode 100644
index 0000000..97ef6bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+
+config = Script.get_config()
+
+user_group = config['configurations']['global']["user_group"]
+ganglia_conf_dir = config['configurations']['global']["ganglia_conf_dir"]
+ganglia_dir = "/etc/ganglia"
+ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
+
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+webserver_group = "apache"
+rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
+rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
+
+ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
+
+hostname = config["hostname"]
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+rm_host = default("/clusterHostInfo/rm_host", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+# datanodes are marked as slave_hosts
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
+hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
+flume_hosts = default("/clusterHostInfo/flume_hosts", [])
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+is_tasktracker = hostname in tt_hosts
+is_hbase_rs = hostname in hbase_rs_hosts
+is_flume = hostname in flume_hosts
+
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_historyserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_tasktracker = not len(tt_hosts) == 0
+has_hbase_rs = not len(hbase_rs_hosts) == 0
+has_flume = not len(flume_hosts) == 0
+
+if System.get_instance().os_family == "suse":
+  rrd_py_path = '/srv/www/cgi-bin'
+else:
+  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
new file mode 100644
index 0000000..3ccad2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..23588a5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,34 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+    HDPJournalNode      {{ganglia_server_host}}   8654
+    HDPFlumeServer      {{ganglia_server_host}}   8655
+    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
+    HDPNodeManager     	{{ganglia_server_host}}   8657
+    HDPTaskTracker     	{{ganglia_server_host}}   8658
+    HDPDataNode       	{{ganglia_server_host}}   8659
+    HDPSlaves       	{{ganglia_server_host}}   8660
+    HDPNameNode         {{ganglia_server_host}}   8661
+    HDPJobTracker     	{{ganglia_server_host}}  8662
+    HDPHBaseMaster      {{ganglia_server_host}}   8663
+    HDPResourceManager  {{ganglia_server_host}}   8664
+    HDPHistoryServer    {{ganglia_server_host}}   8666

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..1ead550
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..4b5bdd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..2258d73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-log4j.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hbase.root.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hbase.security.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hbase.log.dir</name>
+    <value>.</value>
+  </property>
+  <property>
+    <name>hbase.log.file</name>
+    <value>hbase.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hbase.root.logger}</value>
+  </property>
+  <property>
+    <name>log4j.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hbase.log.dir}/${hbase.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hbase.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hbase.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.File</name>
+    <value>${hbase.log.dir}/${hbase.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxFileSize</name>
+    <value>${hbase.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxBackupIndex</name>
+    <value>${hbase.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hbase.security.log.file</name>
+    <value>SecurityAuth.audit</value>
+  </property>
+  <property>
+    <name>hbase.security.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hbase.security.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.File</name>
+    <value>${hbase.log.dir}/${hbase.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxFileSize</name>
+    <value>${hbase.security.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxBackupIndex</name>
+    <value>${hbase.security.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.category.SecurityLogger</name>
+    <value>${hbase.security.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.SecurityLogger</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.NullAppender</name>
+    <value>org.apache.log4j.varia.NullAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.zookeeper</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hbase</name>
+    <value>DEBUG</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher</name>
+    <value>INFO</value>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
index bda405a..7a7c3d6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
@@ -16,29 +16,109 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.6.1.3.2.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.94.6.1.3.3.0</version>
+      <components>
         <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
+          <name>HBASE_MASTER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_regionserver.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HBASE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hbase-site</config-type>
-      <config-type>hbase-policy</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-log4j</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..39fe6e5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..e6e7fb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..fddd1b7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+def hbase(type=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+  
+  Directory( params.conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+  
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "hdfs-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+  
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+      configurations = params.config['configurations']['hbase-policy'],
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  
+  hbase_TemplateConfig( 'hbase-env.sh')     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
+  
+  if type != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory ( [params.tmp_dir, params.log_dir],
+      owner = params.hbase_user,
+      recursive = True
+    )
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+                   dir=params.conf_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hbase_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.conf_dir}/log4j.properties"))):
+    File(format("{params.conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user
+    )
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..0f2a1bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/'
+  stdoutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
+  
+  HbaseClient().execute()
+  
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..dba89fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+
+  if params.hbase_drain_only == True:
+    print "TBD: Remove host from draining"
+    pass
+
+  else:
+
+    kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};") if params.security_enabled else ""
+
+    hosts = params.hbase_excluded_hosts.split(",")
+    for host in hosts:
+      if host:
+        print "TBD: Add host to draining"
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {conf_dir} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
+
+
+pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..9c78e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='master')
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
+  stroutputf = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
+  
+  HbaseMaster().execute()
+  
+if __name__ == "__main__":
+  HbaseMaster().execute()
+  #main()


[35/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 15c85eb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 50b075a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index 020b41d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 7fbc4c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index b23045e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  curl $url -o /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-port=$3
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 964225e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management.libraries.script.config_dictionary import UnknownConfiguration
-
-def get_port_from_url(address):
-  if not is_empty(address):
-    return address.split(':')[-1]
-  else:
-    return address
-  
-def is_empty(var):
-  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index 9150995..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-
-  # enable snmpd
-  Execute( "service snmpd start; chkconfig snmpd on",
-    path = "/usr/local/bin/:/bin/:/sbin/"
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
-  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
-  Execute( cmd,
-    not_if = test
-  )
-
-  File( "/etc/nagios/htpasswd.users",
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().os_family == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  else:
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index 02685c7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-    
-def remove_conflicting_packages():  
-  Package( 'hdp_mon_nagios_addons',
-    action = "remove"
-  )
-
-  Package( 'nagios-plugins',
-    action = "remove"
-  )
-
-  Execute( "rpm -e --allmatches --nopostun nagios",
-    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    ignore_failures = True 
-  )
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
-  stroutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
-  
-  NagiosServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index 9f6c884..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().os_family != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d/', 
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index cc411b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_service(action='start'): # start or stop
-  import params
-
-  if action == 'start':
-   command = "service nagios start"
-  elif action == 'stop':
-   command = format("service nagios stop && rm -f {nagios_pid_file}")
-
-  Execute( command,
-     path    = "/usr/local/bin/:/bin/:/sbin/"      
-  )
-  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index 870a0db..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from functions import get_port_from_url
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/nagios"
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"
-plugins_dir = "/usr/lib64/nagios/plugins"
-nagios_obj_dir = "/etc/nagios/objects"
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
-hadoop_ssl_enabled = False
-
-namenode_metadata_port = "8020"
-oozie_server_port = "11000"
-# different to HDP2    
-namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.http.address'])
-# different to HDP2  
-snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.secondary.http.address"])
-
-hbase_master_rpc_port = "60000"
-rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
-nm_port = "8042"
-hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
-journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
-flume_port = "4159"
-hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_rs_port = "60030"
-
-# this 4 is different for HDP2
-jtnode_port = get_port_from_url(config['configurations']['mapred-site']['mapred.job.tracker.http.address'])
-jobhistory_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
-tasktracker_port = "50060"
-mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-
-# this is different for HDP2
-nn_metrics_property = "FSNamesystemMetrics"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
-
-
-java64_home = config['hostLevelParams']['java_home']
-security_enabled = config['configurations']['global']['security_enabled']
-
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().os_family == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-else:
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
-
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-# can differ on HDP2
-_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    # no in HDP2
-    'tasktracker-servers' : _mapred_tt_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 33b35fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 9dada51..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index 99870d0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,114 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if env.system.os_family != "suse" %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index d24e5cd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 778e4f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for host in all_hosts %}
-define host {
-        alias        {{host}}
-        host_name    {{host}}
-        use          linux-server
-        address      {{host}}
-        check_interval         0.25
-        retry_interval         0.25
-        max_check_attempts     4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index 233051f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% if hostgroup_defs['namenode'] or
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-{% endif %}
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] %}
-define servicegroup {
-  servicegroup_name  HIVE-METASTORE
-  alias  HIVE-METASTORE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
\ No newline at end of file


[32/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index d6c2a1f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <comment>This is comment for WEBHCAT service</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh
deleted file mode 100644
index cefc4f0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py
deleted file mode 100644
index 83211e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-webhcat_user = config['configurations']['global']['webhcat_user']
-download_url = config['configurations']['global']['apache_artifacts_download_url']
-
-config_dir = '/etc/hcatalog/conf'
-
-templeton_log_dir = config['configurations']['global']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-hadoop_home = '/usr'
-user_group = config['configurations']['global']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py
deleted file mode 100644
index 58b4d25..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File('/tmp/templetonSmoke.sh',
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_enabled} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py
deleted file mode 100644
index 21dde6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py
deleted file mode 100644
index ae12f54..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import sys
-
-
-def webhcat():
-  import params
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.config_dir,
-            owner=params.webhcat_user,
-            group=params.user_group)
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-  )
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=Template('webhcat-env.sh.j2')
-  )
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  copyFromLocal(path='/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
-                kinnit_if_needed=kinit_if_needed
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
-  )
-
-
-def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
-  import params
-
-  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
-  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
-
-  ExecuteHadoop(copy_cmd,
-                not_if=unless_cmd,
-                user=owner,
-                conf_dir=params.hadoop_conf_dir)
-
-  if not owner:
-    chown = None
-  else:
-    if not group:
-      chown = owner
-    else:
-      chown = format('{owner}:{group}')
-
-  if not chown:
-    chown_cmd = format("fs -chown {chown} {dest_dir}")
-
-    ExecuteHadoop(copy_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)
-
-  if not mode:
-    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
-
-    ExecuteHadoop(chmod_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py
deleted file mode 100644
index 4365111..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-
-class WebHCatServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    webhcat_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-
-if __name__ == "__main__":
-  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py
deleted file mode 100644
index 12c3854..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-def webhcat_service(action='start'):
-  import params
-
-  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
-
-  if action == 'start':
-    demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
-    Execute(demon_cmd,
-            user=params.webhcat_user,
-            not_if=no_op_test
-    )
-  elif action == 'stop':
-    demon_cmd = format('{cmd} stop')
-    Execute(demon_cmd,
-            user=params.webhcat_user
-    )
-    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2
deleted file mode 100644
index 9ea4a79..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE={{pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
deleted file mode 100644
index 8fb48b9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>log4j.appender.CONSOLE</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.CONSOLE.Threshold</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.appender.CONSOLE.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.CONSOLE.layout.ConversionPattern</name>
-    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.Threshold</name>
-    <value>DEBUG</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.File</name>
-    <value>zookeeper.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.MaxFileSize</name>
-    <value>10MB</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.ROLLINGFILE.layout.ConversionPattern</name>
-    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE</name>
-    <value>org.apache.log4j.FileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.Threshold</name>
-    <value>TRACE</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.File</name>
-    <value>zookeeper_trace.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.TRACEFILE.layout.ConversionPattern</name>
-    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 07817b1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.1.3.3.0</version>
-      <components>
-
-        <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZOOKEEPER_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>zookeeper</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>zookeeper-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh
deleted file mode 100644
index 07017e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh
deleted file mode 100644
index 49ceb4d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh
deleted file mode 100644
index 32dfce4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh
deleted file mode 100644
index c1c11b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  su - $smoke_user -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
-  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py
deleted file mode 100644
index 3c02248..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['global']['zk_user']
-hostname = config['hostname']
-zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['global']['user_group']
-
-smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
-zk_log_dir = config['configurations']['global']['zk_log_dir']
-zk_data_dir = config['configurations']['global']['zk_data_dir']
-zk_pid_dir = status_params.zk_pid_dir
-zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize = "-Xmx1024m"
-
-tickTime = config['configurations']['global']['tickTime']
-initLimit = config['configurations']['global']['initLimit']
-syncLimit = config['configurations']['global']['syncLimit']
-clientPort = config['configurations']['global']['clientPort']
-
-if 'zoo.cfg' in config['configurations']:
-  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
-else:
-  zoo_cfg_properties_map = {}
-zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
-
-zk_primary_name = "zookeeper"
-zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
-zk_principal = zk_principal_name.replace('_HOST',hostname)
-
-java64_home = config['hostLevelParams']['java_home']
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-
-keytab_path = "/etc/security/keytabs"
-zk_keytab_path = format("{keytab_path}/zk.service.keytab")
-zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
-zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-security_enabled = config['configurations']['global']['security_enabled']
-
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#log4j.properties
-if ('zookeeper-log4j' in config['configurations']):
-  log4j_props = config['configurations']['zookeeper-log4j']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py
deleted file mode 100644
index 6b3553d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ZookeeperServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File("/tmp/zkSmoke.sh",
-         mode=0755,
-         content=StaticFile('zkSmoke.sh')
-    )
-
-    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
-                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
-                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd_qourum,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py
deleted file mode 100644
index 98f2903..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-zk_pid_dir = config['configurations']['global']['zk_pid_dir']
-zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py
deleted file mode 100644
index 7b74b4c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-
-def zookeeper(type = None):
-  import params
-
-  Directory(params.config_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
-  configFile("configuration.xsl", template_name="configuration.xsl.j2")
-
-  Directory(params.zk_pid_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_log_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_data_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  if type == 'server':
-    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
-
-    File(format("{zk_data_dir}/myid"),
-         mode = 0644,
-         content = myid
-    )
-
-  if (params.log4j_props != None):
-    PropertiesFile('log4j.properties',
-      dir=params.config_dir,
-      properties=params.log4j_props,
-      mode=0664,
-      owner=params.zk_user,
-      group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
-    File(format("{params.config_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.zk_user
-    )
-
-  if params.security_enabled:
-    if type == "server":
-      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-    else:
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-
-  File(format("{config_dir}/zoo_sample.cfg"),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-def configFile(name, template_name=None):
-  import params
-
-  File(format("{config_dir}/{name}"),
-       content=Template(template_name),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py
deleted file mode 100644
index 028a37d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-
-class ZookeeperClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    zookeeper(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py
deleted file mode 100644
index e8cc264..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-from zookeeper_service import zookeeper_service
-
-class ZookeeperServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    zookeeper(type='server')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    zookeeper_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    zookeeper_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zk_pid_file)
-
-if __name__ == "__main__":
-  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py
deleted file mode 100644
index 83b8f08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def zookeeper_service(action='start'):
-  import params
-
-  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
-
-  if action == 'start':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
-    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.zk_user
-    )
-  elif action == 'stop':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
-    rm_pid = format("rm -f {zk_pid_file}")
-    Execute(daemon_cmd,
-            user=params.zk_user
-    )
-    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2
deleted file mode 100644
index ca498b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2
deleted file mode 100644
index 5b68218..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime={{tickTime}}
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit={{initLimit}}
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit={{syncLimit}}
-# the directory where the snapshot is stored.
-dataDir={{zk_data_dir}}
-# the port at which the clients will connect
-clientPort={{clientPort}}
-{% for host in zookeeper_hosts %}
-server.{{loop.index}}={{host}}:2888:3888
-{% endfor %}
-
-{% if security_enabled %}
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-{% endif %}
-
-{% if zoo_cfg_properties_map_length > 0 %}
-# Custom properties
-{% endif %}
-{% for key, value in zoo_cfg_properties_map.iteritems() %}
-{{key}}={{value}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
deleted file mode 100644
index 493a2a4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME={{java64_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
deleted file mode 100644
index c70449d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
deleted file mode 100644
index 639cdaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{zk_keytab_path}}"
-principal="{{zk_principal}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/files/changeToSecureUid.sh
new file mode 100644
index 0000000..4872a10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/files/changeToSecureUid.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..51e5cd2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,36 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_users()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..dc6d770
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,84 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+zk_user = config['configurations']['global']['zk_user']
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+storm_user = config['configurations']['global']['storm_user']
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group =  "users"
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..cf6c2c5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,113 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  Group(params.user_group)
+  Group(params.smoke_user_group)
+  Group(params.proxyuser_group)
+  User(params.smoke_user,
+       gid=params.user_group,
+       groups=[params.proxyuser_group]
+  )
+  smoke_user_dirs = format(
+    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+  set_uid(params.smoke_user, smoke_user_dirs)
+
+  if params.has_hbase_masters:
+    User(params.hbase_user,
+         gid = params.user_group,
+         groups=[params.user_group])
+    hbase_user_dirs = format(
+      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+    set_uid(params.hbase_user, hbase_user_dirs)
+
+  if params.has_nagios:
+    Group(params.nagios_group)
+    User(params.nagios_user,
+         gid=params.nagios_group)
+
+  if params.has_oozie_server:
+    User(params.oozie_user,
+         gid = params.user_group)
+
+  if params.has_hcat_server_host:
+    User(params.webhcat_user,
+         gid = params.user_group)
+    User(params.hcat_user,
+         gid = params.user_group)
+
+  if params.has_hive_server_host:
+    User(params.hive_user,
+         gid = params.user_group)
+
+  if params.has_resourcemanager:
+    User(params.yarn_user,
+         gid = params.user_group)
+
+  if params.has_ganglia_server:
+    Group(params.gmetad_user)
+    Group(params.gmond_user)
+    User(params.gmond_user,
+         gid=params.user_group,
+        groups=[params.gmond_user])
+    User(params.gmetad_user,
+         gid=params.user_group,
+        groups=[params.gmetad_user])
+
+  User(params.hdfs_user,
+        gid=params.user_group,
+        groups=[params.user_group]
+  )
+  User(params.mapred_user,
+       gid=params.user_group,
+       groups=[params.user_group]
+  )
+  if params.has_zk_host:
+    User(params.zk_user,
+         gid=params.user_group)
+
+  if params.has_storm_server:
+    User(params.storm_user,
+         gid=params.user_group,
+         groups=[params.user_group]
+    )
+
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  File("/tmp/changeUid.sh",
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
+          not_if = format("test $(id -u {user}) -gt 1000"))
+
+def install_packages():
+  Package("unzip")
+  Package("net-snmp")
+  Package("net-snmp-utils")
\ No newline at end of file


[04/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_server.py b/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_server.py
deleted file mode 100644
index 3314f10..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_server.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from stacks.utils.RMFTestCase import *
-
-
-class TestGangliaServer(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/GANGLIA/package/scripts/ganglia_server.py",
-                       classname="GangliaServer",
-                       command="configure",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Group', 'hadoop',
-                              )
-    self.assertResourceCalled('Group', 'nobody',
-                              )
-    self.assertResourceCalled('Group', 'nobody',
-                              )
-    self.assertResourceCalled('User', 'nobody',
-                              groups = [u'nobody'],
-                              )
-    self.assertResourceCalled('User', 'nobody',
-                              groups = [u'nobody'],
-                              )
-    self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
-                              owner = 'root',
-                              group = 'root',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
-                              content = StaticFile('gmetad.init'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
-                              content = StaticFile('gmond.init'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
-                              content = StaticFile('checkGmond.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
-                              content = StaticFile('checkRrdcached.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
-                              content = StaticFile('gmetadLib.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
-                              content = StaticFile('gmondLib.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
-                              content = StaticFile('rrdcachedLib.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
-                              content = StaticFile('setupGanglia.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
-                              content = StaticFile('startGmetad.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
-                              content = StaticFile('startGmond.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
-                              content = StaticFile('startRrdcached.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
-                              content = StaticFile('stopGmetad.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
-                              content = StaticFile('stopGmond.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
-                              content = StaticFile('stopRrdcached.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
-                              content = StaticFile('teardownGanglia.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
-                              owner = 'root',
-                              template_tag = None,
-                              group = 'root',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
-                              owner = 'root',
-                              template_tag = None,
-                              group = 'root',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
-                              owner = 'root',
-                              template_tag = None,
-                              group = 'root',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPDataNode -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPTaskTracker -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseRegionServer -m -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -t -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Directory', '/var/lib/ganglia/dwoo',
-                              owner = 'nobody',
-                              recursive = True,
-                              mode = 0777,
-                              )
-    self.assertResourceCalled('Directory', '/srv/www/cgi-bin',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('File', '/srv/www/cgi-bin/rrd.py',
-                              content = StaticFile('rrd.py'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/etc/ganglia/gmetad.conf',
-                              owner = 'root',
-                              group = 'hadoop',
-                              )
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/GANGLIA/package/scripts/ganglia_server.py",
-                       classname="GangliaServer",
-                       command="start",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Execute', 'service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertResourceCalled('MonitorWebserver', 'restart',
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/GANGLIA/package/scripts/ganglia_server.py",
-                       classname="GangliaServer",
-                       command="stop",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Execute', 'service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertResourceCalled('MonitorWebserver', 'restart',
-                              )
-    self.assertNoMoreResources()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_client.py
deleted file mode 100644
index 7845d97..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_client.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHBaseClient(RMFTestCase):
-  
-  def test_configure_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_client.py",
-                   classname = "HbaseClient",
-                   command = "configure",
-                   config_file="secured.json"
-    )
-    
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
-      owner = 'hbase',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
-      owner = 'hbase',
-      template_tag = 'GANGLIA-RS',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_client_jaas.conf',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertNoMoreResources()
-    
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_client.py",
-                   classname = "HbaseClient",
-                   command = "configure",
-                   config_file="default.json"
-    )
-    
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
-      owner = 'hbase',
-      group = 'hadoop',
-      recursive = True,
-    )    
-    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
-    )    
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
-    )    
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-    )    
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
-      owner = 'hbase',
-      template_tag = None,
-    )    
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
-      owner = 'hbase',
-      template_tag = 'GANGLIA-RS',
-    )    
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertNoMoreResources()
-    
-
-    
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_master.py
deleted file mode 100644
index 7e6d67b..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_master.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHBaseMaster(RMFTestCase):
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "configure",
-                   config_file="default.json"
-    )
-    
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-    
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "start",
-                   config_file="default.json"
-    )
-    
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
-      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
-      user = 'hbase'
-    )
-    self.assertNoMoreResources()
-    
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "stop",
-                   config_file="default.json"
-    )
-    
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master && rm -f /var/run/hbase/hbase-hbase-master.pid',
-      not_if = None,
-      user = 'hbase',
-    )
-    self.assertNoMoreResources()
-
-  def test_decom_default(self):
-    self.executeScript("2.1.1/services/HBASE/package/scripts/hbase_master.py",
-                       classname = "HbaseMaster",
-                       command = "decommission",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
-                              logoutput = True,
-                              user = 'hbase',
-                              )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
-                              logoutput = True,
-                              user = 'hbase',
-                              )
-    self.assertNoMoreResources()
-
-  def test_configure_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "configure",
-                   config_file="secured.json"
-    )
-    
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-    
-  def test_start_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "start",
-                   config_file="secured.json"
-    )
-    
-    self.assert_configure_secured()
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
-      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
-      user = 'hbase',
-    )
-    self.assertNoMoreResources()
-    
-  def test_stop_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_master.py",
-                   classname = "HbaseMaster",
-                   command = "stop",
-                   config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master && rm -f /var/run/hbase/hbase-hbase-master.pid',
-      not_if = None,
-      user = 'hbase',
-    )
-    self.assertNoMoreResources()
-
-  def test_decom_secure(self):
-    self.executeScript("2.1.1/services/HBASE/package/scripts/hbase_master.py",
-                       classname = "HbaseMaster",
-                       command = "decommission",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
-                              logoutput = True,
-                              user = 'hbase',
-                              )
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
-      owner = 'hbase',
-      group = 'hadoop',
-      recursive = True,
-    )   
-    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
-    )  
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
-      owner = 'hbase',
-      template_tag = 'GANGLIA-MASTER',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )   
-    self.assertResourceCalled('Directory', '/hadoop/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )    
-    self.assertResourceCalled('Directory', '/var/log/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
-  
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
-      owner = 'hbase',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
-      owner = 'hbase',
-      template_tag = 'GANGLIA-MASTER',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_master_jaas.conf',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_regionserver.py
deleted file mode 100644
index 1ad610b..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HBASE/test_hbase_regionserver.py
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHbaseRegionServer(RMFTestCase):
-  
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "configure",
-                   config_file="default.json"
-    )
-    
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-    
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "start",
-                   config_file="default.json"
-    )
-    
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
-      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
-      user = 'hbase'
-    )
-    self.assertNoMoreResources()
-    
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "stop",
-                   config_file="default.json"
-    )
-    
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver && rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
-      not_if = None,
-      user = 'hbase',
-    )
-    self.assertNoMoreResources()
-    
-  def test_configure_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "configure",
-                   config_file="secured.json"
-    )
-    
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-    
-  def test_start_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "start",
-                   config_file="secured.json"
-    )
-    
-    self.assert_configure_secured()
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
-      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
-      user = 'hbase',
-    )
-    self.assertNoMoreResources()
-    
-  def test_stop_secured(self):
-    self.executeScript("1.3.3/services/HBASE/package/scripts/hbase_regionserver.py",
-                   classname = "HbaseRegionServer",
-                   command = "stop",
-                   config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver && rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
-      not_if = None,
-      user = 'hbase',
-    )
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
-      owner = 'hbase',
-      group = 'hadoop',
-      recursive = True,
-    )   
-    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
-    )  
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
-      owner = 'hbase',
-      template_tag = 'GANGLIA-RS',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )   
-    self.assertResourceCalled('Directory', '/hadoop/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )    
-    self.assertResourceCalled('Directory', '/var/log/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
-  
-  
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/etc/hbase/conf',
-      owner = 'hbase',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-      conf_dir = '/etc/hbase/conf',
-      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties      
-    )
-    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
-      owner = 'hbase',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
-      owner = 'hbase',
-      template_tag = 'GANGLIA-RS',
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_regionserver_jaas.conf',
-      owner = 'hbase',
-      template_tag = None,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hbase',
-      owner = 'hbase',
-      recursive = True,
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hcat_client.py b/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hcat_client.py
deleted file mode 100644
index 5a935aa..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hcat_client.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHcatClient(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hcat_client.py",
-                       classname = "HCatClient",
-                       command = "configure",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Directory', '/var/run/webhcat',
-      owner = 'hcat',
-      recursive = True,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hcatalog/conf/hcat-env.sh',
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()
-
-
-
-  def test_configure_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hcat_client.py",
-                         classname = "HCatClient",
-                         command = "configure",
-                         config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Directory', '/var/run/webhcat',
-      owner = 'hcat',
-      recursive = True,
-    )
-    self.assertResourceCalled('TemplateConfig', '/etc/hcatalog/conf/hcat-env.sh',
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_client.py
deleted file mode 100644
index 56eff70..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_client.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHiveClient(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_client.py",
-                       classname = "HiveClient",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Directory', '/etc/hive/conf',
-      owner = 'hive',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 420,
-      conf_dir = '/etc/hive/conf',
-      configurations = self.getConfig()['configurations']['hive-site'],
-    )
-    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
-      not_if = '[ -f DBConnectionVerification.jar]',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh',
-      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf"),
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()
-
-
-
-  def test_configure_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_client.py",
-                       classname = "HiveClient",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assertResourceCalled('Directory', '/etc/hive/conf',
-      owner = 'hive',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 420,
-      conf_dir = '/etc/hive/conf',
-      configurations = self.getConfig()['configurations']['hive-site'],
-    )
-    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
-      not_if = '[ -f DBConnectionVerification.jar]',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh',
-      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf"),
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_metastore.py
deleted file mode 100644
index d43cf4b..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_metastore.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHiveMetastore(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "start",
-                       config_file="default.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server',
-                              not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
-                              user = 'hive'
-    )
-
-    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
-                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
-    )
-
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive.pid')
-    self.assertNoMoreResources()
-
-  def test_configure_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "start",
-                       config_file="secured.json"
-    )
-
-    self.assert_configure_secured()
-    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server',
-                              not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
-                              user = 'hive'
-    )
-
-    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
-                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
-    )
-
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_metastore.py",
-                       classname = "HiveMetastore",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive.pid')
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
-      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
-      path = ['/bin', 'usr/bin/'],
-      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
-    )
-    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
-      owner = 'hive',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 384,
-      conf_dir = '/etc/hive/conf.server',
-      configurations = self.getConfig()['configurations']['hive-site'],
-    )
-    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
-      not_if = '[ -f DBConnectionVerification.jar]',
-    )
-    self.assertResourceCalled('File', '/tmp/start_metastore_script',
-      content = StaticFile('startMetastore.sh'),
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/lib/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
-      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
-      path = ['/bin', 'usr/bin/'],
-      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
-    )
-    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
-      owner = 'hive',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 384,
-      conf_dir = '/etc/hive/conf.server',
-      configurations = self.getConfig()['configurations']['hive-site'],
-    )
-    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
-      not_if = '[ -f DBConnectionVerification.jar]',
-    )
-    self.assertResourceCalled('File', '/tmp/start_metastore_script',
-      content = StaticFile('startMetastore.sh'),
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/lib/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_server.py
deleted file mode 100644
index 44effe7..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_server.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHiveServer(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-  
-  def test_start_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_server.py",
-                         classname = "HiveServer",
-                         command = "start",
-                         config_file="default.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server',
-                              not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
-                              user = 'hive'
-    )
-
-    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
-                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
-    )
-
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive-server.pid')
-    self.assertNoMoreResources()
-
-    
-  def test_configure_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "start",
-                       config_file="secured.json"
-    )
-
-    self.assert_configure_secured()
-    self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server',
-                              not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
-                              user = 'hive'
-    )
-
-    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
-                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
-    )
-
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/hive_server.py",
-                       classname = "HiveServer",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive-server.pid')
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
-      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
-      path = ['/bin', 'usr/bin/'],
-      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
-    )
-    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
-      owner = 'hive',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 384,
-      conf_dir = '/etc/hive/conf.server',
-      configurations = self.getConfig()['configurations']['hive-site'],
-    )
-    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
-      not_if = '[ -f DBConnectionVerification.jar]',
-    )
-    self.assertResourceCalled('File', '/tmp/start_hiveserver2_script',
-      content = StaticFile('startHiveserver2.sh'),
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/lib/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
-      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
-      path = ['/bin', 'usr/bin/'],
-      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
-    )
-    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
-      owner = 'hive',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 384,
-      conf_dir = '/etc/hive/conf.server',
-      configurations = self.getConfig()['configurations']['hive-site'],
-    )
-    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
-      not_if = '[ -f DBConnectionVerification.jar]',
-    )
-    self.assertResourceCalled('File', '/tmp/start_hiveserver2_script',
-      content = StaticFile('startHiveserver2.sh'),
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/var/run/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/lib/hive',
-      owner = 'hive',
-      group = 'hadoop',
-      mode = 493,
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
-      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
-      owner = 'hive',
-      group = 'hadoop',
-    )
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_service_check.py b/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_service_check.py
deleted file mode 100644
index c5af2a6..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_hive_service_check.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-import datetime
-import  resource_management.libraries.functions
-@patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
-class TestServiceCheck(RMFTestCase):
-
-  def test_service_check_default(self):
-
-    self.executeScript("1.3.3/services/HIVE/package/scripts/service_check.py",
-                        classname="HiveServiceCheck",
-                        command="service_check",
-                        config_file="default.json"
-    )
-    self.assertResourceCalled('File', '/tmp/hiveserver2Smoke.sh',
-                        content = StaticFile('hiveserver2Smoke.sh'),
-                        mode = 493,
-    )
-    self.assertResourceCalled('File', '/tmp/hiveserver2.sql',
-                        content = StaticFile('hiveserver2.sql'),
-    )
-    self.assertResourceCalled('Execute', "env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hiveserver2Smoke.sh jdbc:hive2://[u'c6402.ambari.apache.org']:10000 /tmp/hiveserver2.sql",
-                        logoutput = True,
-                        path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                        tries = 3,
-                        user = 'ambari-qa',
-                        try_sleep = 5,
-    )
-    self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
-                        content = StaticFile('hcatSmoke.sh'),
-                        mode = 493,
-    )
-    self.assertResourceCalled('Execute', 'sh /tmp/hcatSmoke.sh hcatsmoke prepare',
-                        logoutput = True,
-                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-                        tries = 3,
-                        user = 'ambari-qa',
-                        try_sleep = 5,
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
-                        logoutput = True,
-                        user = 'hdfs',
-                        conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('Execute', 'sh /tmp/hcatSmoke.sh hcatsmoke cleanup',
-                        logoutput = True,
-                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-                        tries = 3,
-                        user = 'ambari-qa',
-                        try_sleep = 5,
-    )
-    self.assertNoMoreResources()
-
-  def test_service_check_secured(self):
-
-    self.executeScript("1.3.3/services/HIVE/package/scripts/service_check.py",
-                        classname="HiveServiceCheck",
-                        command="service_check",
-                        config_file="secured.json"
-    )
-    self.assertResourceCalled('File', '/tmp/hiveserver2Smoke.sh',
-                        content = StaticFile('hiveserver2Smoke.sh'),
-                        mode = 493,
-    )
-    self.assertResourceCalled('File', '/tmp/hiveserver2.sql',
-                        content = StaticFile('hiveserver2.sql'),
-    )
-    self.assertResourceCalled('Execute', "/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hiveserver2Smoke.sh jdbc:hive2://[u'c6402.ambari.apache.org']:10000/\\;principal=/etc/security/keytabs/hive.service.keytab /tmp/hiveserver2.sql",
-                        logoutput = True,
-                        path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                        tries = 3,
-                        user = 'ambari-qa',
-                        try_sleep = 5,
-    )
-    self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
-                        content = StaticFile('hcatSmoke.sh'),
-                        mode = 493,
-    )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; sh /tmp/hcatSmoke.sh hcatsmoke prepare',
-                        logoutput = True,
-                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-                        tries = 3,
-                        user = 'ambari-qa',
-                        try_sleep = 5,
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
-                        logoutput = True,
-                        user = 'hdfs',
-                        conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; sh /tmp/hcatSmoke.sh hcatsmoke cleanup',
-                        logoutput = True,
-                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-                        tries = 3,
-                        user = 'ambari-qa',
-                        try_sleep = 5,
-    )
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_mysql_server.py b/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_mysql_server.py
deleted file mode 100644
index f79b954..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/HIVE/test_mysql_server.py
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestMySqlServer(RMFTestCase):
-  def test_configure_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/mysql_server.py",
-                       classname = "MysqlServer",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/mysql_server.py",
-                       classname = "MysqlServer",
-                       command = "start",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'service mysql start',
-                       logoutput = True,
-                       path = ['/usr/local/bin/:/bin/:/sbin/'],
-                       tries = 1,
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/mysql_server.py",
-                       classname = "MysqlServer",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'service mysql stop',
-                              logoutput = True,
-                              path = ['/usr/local/bin/:/bin/:/sbin/'],
-                              tries = 1,
-    )
-    self.assertNoMoreResources()
-
-
-  def test_configure_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/mysql_server.py",
-                       classname = "MysqlServer",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/mysql_server.py",
-                       classname = "MysqlServer",
-                       command = "start",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'service mysql start',
-                              logoutput = True,
-                              path = ['/usr/local/bin/:/bin/:/sbin/'],
-                              tries = 1,
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("2.1.1/services/HIVE/package/scripts/mysql_server.py",
-                       classname = "MysqlServer",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'service mysql stop',
-                              logoutput = True,
-                              path = ['/usr/local/bin/:/bin/:/sbin/'],
-                              tries = 1,
-                              )
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Execute', 'service mysql start',
-      logoutput = True,
-      path = ['/usr/local/bin/:/bin/:/sbin/'],
-      tries = 1,
-    )
-    self.assertResourceCalled('File', '/tmp/addMysqlUser.sh',
-      content = StaticFile('addMysqlUser.sh'),
-      mode = 493,
-    )
-    self.assertResourceCalled('Execute', ('bash', '-x', '/tmp/addMysqlUser.sh', 'mysql', u'hive', 'asd', u'c6402.ambari.apache.org'),
-      logoutput = True,
-      path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-      tries = 3,
-      try_sleep = 5,
-    )
-    self.assertResourceCalled('Execute', 'service mysql stop',
-      logoutput = True,
-      path = ['/usr/local/bin/:/bin/:/sbin/'],
-      tries = 1,
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Execute', 'service mysql start',
-      logoutput = True,
-      path = ['/usr/local/bin/:/bin/:/sbin/'],
-      tries = 1,
-    )
-    self.assertResourceCalled('File', '/tmp/addMysqlUser.sh',
-      content = StaticFile('addMysqlUser.sh'),
-      mode = 493,
-    )
-    self.assertResourceCalled('Execute', ('bash', '-x', '/tmp/addMysqlUser.sh', 'mysql', u'hive', 'asd', u'c6402.ambari.apache.org'),
-      logoutput = True,
-      path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-      tries = 3,
-      try_sleep = 5,
-    )
-    self.assertResourceCalled('Execute', 'service mysql stop',
-      logoutput = True,
-      path = ['/usr/local/bin/:/bin/:/sbin/'],
-      tries = 1,
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_client.py b/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_client.py
deleted file mode 100644
index 632ae49..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_client.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestMapreduceClient(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/client.py",
-                       classname = "Client",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()
-
-  def test_configure_secured(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/client.py",
-      classname = "Client",
-      command = "configure",
-      config_file="secured.json"
-    )
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-
-    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_historyserver.py b/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_historyserver.py
deleted file mode 100644
index 51735ce..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_historyserver.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestHistoryServer(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/historyserver.py",
-                       classname = "Historyserver",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/historyserver.py",
-                         classname = "Historyserver",
-                         command = "start",
-                         config_file="default.json"
-      )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver',
-                              user = 'mapred',
-                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
-    )
-    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1',
-                              user = 'mapred',
-                              initial_wait = 5,
-                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/historyserver.py",
-                       classname = "Historyserver",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop historyserver',
-                              user = 'mapred'
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid')
-    self.assertNoMoreResources()
-
-
-  def test_configure_secured(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/historyserver.py",
-                       classname = "Historyserver",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/historyserver.py",
-                         classname = "Historyserver",
-                         command = "start",
-                         config_file="secured.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver',
-                              user = 'mapred',
-                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
-    )
-    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1',
-                              user = 'mapred',
-                              initial_wait = 5,
-                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/historyserver.py",
-                       classname = "Historyserver",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop historyserver',
-                              user = 'mapred'
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid')
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
\ No newline at end of file


[13/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index 020b41d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 7fbc4c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index b23045e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  curl $url -o /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-port=$3
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 964225e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management.libraries.script.config_dictionary import UnknownConfiguration
-
-def get_port_from_url(address):
-  if not is_empty(address):
-    return address.split(':')[-1]
-  else:
-    return address
-  
-def is_empty(var):
-  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index 9150995..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-
-  # enable snmpd
-  Execute( "service snmpd start; chkconfig snmpd on",
-    path = "/usr/local/bin/:/bin/:/sbin/"
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
-  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
-  Execute( cmd,
-    not_if = test
-  )
-
-  File( "/etc/nagios/htpasswd.users",
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().os_family == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  else:
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index 02685c7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-    
-def remove_conflicting_packages():  
-  Package( 'hdp_mon_nagios_addons',
-    action = "remove"
-  )
-
-  Package( 'nagios-plugins',
-    action = "remove"
-  )
-
-  Execute( "rpm -e --allmatches --nopostun nagios",
-    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    ignore_failures = True 
-  )
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
-  stroutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
-  
-  NagiosServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index 9f6c884..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().os_family != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d/', 
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index cc411b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_service(action='start'): # start or stop
-  import params
-
-  if action == 'start':
-   command = "service nagios start"
-  elif action == 'stop':
-   command = format("service nagios stop && rm -f {nagios_pid_file}")
-
-  Execute( command,
-     path    = "/usr/local/bin/:/bin/:/sbin/"      
-  )
-  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index 4edae8c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from functions import get_port_from_url
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/nagios"
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"
-plugins_dir = "/usr/lib64/nagios/plugins"
-nagios_obj_dir = "/etc/nagios/objects"
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
-hadoop_ssl_enabled = False
-
-namenode_metadata_port = "8020"
-oozie_server_port = "11000"
-# different to HDP1    
-namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
-# different to HDP1  
-snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.namenode.secondary.http-address"])
-
-hbase_master_rpc_port = "60000"
-rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
-nm_port = "8042"
-hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'])
-journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
-flume_port = "4159"
-hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_rs_port = "60030"
-
-# this is different for HDP1
-nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
-
-
-java64_home = config['hostLevelParams']['java_home']
-security_enabled = config['configurations']['global']['security_enabled']
-
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().os_family == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-else:
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
-
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-# can differ on HDP1
-#_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    # HDP1
-    #'tasktracker-servers' : _mapred_tt_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 33b35fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 9dada51..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index 99870d0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,114 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if env.system.os_family != "suse" %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index 2bcbf7c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 62555d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-{% for host in all_hosts %}
-define host {
-        alias        {{host}}
-        host_name    {{host}}
-        use          linux-server
-        address      {{host}}
-        check_interval         0.25
-        retry_interval         0.25
-        max_check_attempts     4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index 0101ce6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,80 +0,0 @@
-{% if hostgroup_defs['namenode'] or
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-{% endif %}
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] %}
-define servicegroup {
-  servicegroup_name  HIVE-METASTORE
-  alias  HIVE-METASTORE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index b9f0892..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,643 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{# used only for HDP2 #}
-{% if hostgroup_defs['namenode'] and dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::NameNode HA Healthy
-        servicegroups           HDFS
-        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      5
-}
-{% endif %}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for JobTracker
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% endif %}
-
-{% if hostgroup_defs['snamenode'] %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['namenode'] %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if env.system.os_family != "suse" %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   10
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-{% endif %}
-
-# MAPREDUCE Checks
-{# On HDP1 here are jobtracker and tasktracker alters #}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui!resourcemanager!{{ rm_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.os_family != "suse" %}
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.os_family != "suse" %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{# HBASE:: MASTER Checks
-# define service {
-#         hostgroup_name          hbasemasters
-#         use                     hadoop-service
-#         service_description     HBASEMASTER::HBase Master Web UI
-#         servicegroups           HBASE
-#         check_command           check_webui!hbase!{{ hbase_master_port }}
-#         normal_check_interval   1
-#         retry_check_interval    1
-#         max_check_attempts      3
-# #}
-{%  for hbasemaster in hbase_master_hosts  %}
-{% if env.system.os_family != "suse" %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore status
-        servicegroups           HIVE-METASTORE
-        {% if security_enabled %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-


[47/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..5cd264b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,106 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
+    test_dir_exists = format("hadoop fs -test -e {dir}")
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format(
+        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "{smoke_user}'"))
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=15,
+                  tries=20
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      smoke_test_user = params.smoke_user
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format(
+        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName))
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+    if params.has_zkfc_hosts:
+      pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+      pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+      check_zkfc_process_cmd = format(
+        "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+      Execute(check_zkfc_process_cmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..b2a3bd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.configure(env)
+    snamenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
new file mode 100644
index 0000000..4097373
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['global']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
new file mode 100644
index 0000000..a67d3b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,133 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def service(action=None, name=None, user=None, create_pid_dir=False,
+            create_log_dir=False, keytab=None, principal=None):
+  import params
+
+  kinit_cmd = "true"
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  hadoop_daemon = format(
+    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  if params.security_enabled:
+    principal_replaced = principal.replace("_HOST", params.hostname)
+    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
+
+    if name == "datanode":
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+  daemon_cmd = format("{cmd} {action} {name}")
+
+  service_is_up = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
+
+  Execute(kinit_cmd)
+  Execute(daemon_cmd,
+          user = user,
+          not_if=service_is_up
+  )
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+         ignore_failures=True
+    )
+
+
+def hdfs_directory(name=None, owner=None, group=None,
+                   mode=None, recursive_chown=False, recursive_chmod=False):
+  import params
+
+  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
+  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
+
+  stub_dir = params.namenode_dirs_created_stub_dir
+  stub_filename = params.namenode_dirs_stub_filename
+  dir_absent_in_stub = format(
+    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
+  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
+  tries = 30
+  try_sleep = 10
+  dfs_check_nn_status_cmd = "true"
+
+  #if params.stack_version[0] == "2":
+  #mkdir_cmd = format("fs -mkdir -p {name}")
+  #else:
+  mkdir_cmd = format("fs -mkdir {name}")
+
+  if params.security_enabled:
+    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
+            user = params.hdfs_user)
+  ExecuteHadoop(mkdir_cmd,
+                try_sleep=try_sleep,
+                tries=tries,
+                not_if=format(
+                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "{dir_exists} && ! {namenode_safe_mode_off}"),
+                only_if=format(
+                  "su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "! {dir_exists}'"),
+                conf_dir=params.hadoop_conf_dir,
+                user=params.hdfs_user
+  )
+  Execute(record_dir_in_stub,
+          user=params.hdfs_user,
+          only_if=format("! {dir_absent_in_stub}")
+  )
+
+  recursive = "-R" if recursive_chown else ""
+  perm_cmds = []
+
+  if owner:
+    chown = owner
+    if group:
+      chown = format("{owner}:{group}")
+    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
+  if mode:
+    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
+  for cmd in perm_cmds:
+    ExecuteHadoop(cmd,
+                  user=params.hdfs_user,
+                  only_if=format("su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}'"),
+                  try_sleep=try_sleep,
+                  tries=tries,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
index d9adc80..ae7f586 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
@@ -121,5 +121,28 @@
     <value>hive</value>
     <description>Hive User.</description>
   </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/etc/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <description>WebHCat User.</description>
+  </property>
   
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-exec-log4j.xml
new file mode 100644
index 0000000..b0f5268
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-exec-log4j.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>INFO,FA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>${hive.query.id}.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA</name>
+    <value>org.apache.log4j.FileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,FA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-log4j.xml
new file mode 100644
index 0000000..e92c3e8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-log4j.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>WARN,DRFA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>hive.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.encoding</name>
+    <value>UTF-8</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,DRFA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
index 22e2398..be1fe70 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
@@ -16,30 +16,173 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.11.0.1.3.2.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.11.0.1.3.3.0</version>
+      <components>
 
-    <components>        
         <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
+          <name>HIVE_METASTORE</name>
+          <category>MASTER</category>
+          <!-- may be 0 if specifying external metastore, how to specify this? -->
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
         <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
+          <name>MYSQL_SERVER</name>
+          <category>MASTER</category>
+          <!-- may be 0 if specifying external db, how to specify this? -->
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+
         <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
+          <name>HIVE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.11.0.1.3.3.0</version>
+      <components>
         <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HCAT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hive-site</config-type>
-    </configuration-dependencies>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+
+    </service>
+
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..8d31b91
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+myhostname=$(hostname -f)
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
+  echo "Adding user $mysqldbuser@$myhostname";
+  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
+  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
+fi
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..9e7b33f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..051a21e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startHiveserver2.sh
new file mode 100644
index 0000000..fa90c2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startHiveserver2.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 0000000..9350776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat.py
new file mode 100644
index 0000000..2993d3a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hcat_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  hcat_TemplateConfig('hcat-env.sh')
+
+
+def hcat_TemplateConfig(name):
+  import params
+
+  TemplateConfig(format("{hcat_conf_dir}/{name}"),
+                 owner=params.hcat_user,
+                 group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000..54a8937
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..3886c33
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+
+def hcat_service_check():
+    import params
+
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..5f03871
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+
+  log4j_exec_filename = 'hive-exec-log4j.properties'
+  if (params.log4j_exec_props != None):
+    PropertiesFile(log4j_exec_filename,
+                   dir=params.hive_conf_dir,
+                   properties=params.log4j_exec_props,
+                   mode=0664,
+                   owner=params.hive_user,
+                   group=params.user_group
+    )
+  elif (os.path.exists("{params.hive_conf_dir}/{log4j_exec_filename}.template")):
+    File(format("{params.hive_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{params.hive_conf_dir}/{log4j_exec_filename}.template"))
+    )
+
+  log4j_filename = 'hive-log4j.properties'
+  if (params.log4j_props != None):
+    PropertiesFile(log4j_filename,
+                   dir=params.hive_conf_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hive_user,
+                   group=params.user_group
+    )
+  elif (os.path.exists("{params.hive_conf_dir}/{log4j_filename}.template")):
+    File(format("{params.hive_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{params.hive_conf_dir}/{log4j_filename}.template"))
+    )
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..3ad81a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='hiveserver2')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..e8d4e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
+
+  if action == 'start':
+    demon_cmd = format("{cmd}")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(demon_cmd,
+            user=params.hive_user,
+            not_if=no_op_test
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
+
+  elif action == 'stop':
+    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(demon_cmd)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..8567311
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+class MysqlServer(Script):
+
+  if System.get_instance().os_family == "suse":
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action='start')
+
+    File(params.mysql_adduser_path,
+         mode=0755,
+         content=StaticFile('addMysqlUser.sh')
+    )
+
+    # Autoescaping
+    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
+           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+    mysql_service(daemon_name=self.daemon_name, action='stop')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'stop')
+
+  def status(self, env):
+    mysql_service(daemon_name=self.daemon_name, action = 'status')
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..cfb3e08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+  cmd = format('service {daemon_name} {action}')
+
+  if action == 'status':
+    logoutput = False
+  else:
+    logoutput = True
+
+  Execute(cmd,
+          path="/usr/local/bin/:/bin/:/sbin/",
+          tries=1,
+          logoutput=logoutput)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..734d3ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_server_conf_dir = "/etc/hive/conf.server"
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+#users
+hive_user = config['configurations']['global']['hive_user']
+hive_lib = '/usr/lib/hive/lib/'
+#JDBC driver jar name
+hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_port = config['configurations']['global']['hive_metastore_port']
+hive_var_lib = '/var/lib/hive'
+hive_server_host = config['clusterHostInfo']['hive_server_host']
+hive_url = format("jdbc:hive2://{hive_server_host}:10000")
+
+smokeuser = config['configurations']['global']['smokeuser']
+smoke_test_sql = "/tmp/hiveserver2.sql"
+smoke_test_path = "/tmp/hiveserver2Smoke.sh"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_conf_dir = "/etc/hive/conf"
+hive_dbroot = config['configurations']['global']['hive_dbroot']
+hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#hive-site
+hive_database_name = config['configurations']['global']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh'
+
+hadoop_home = '/usr'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user =  config['configurations']['global']['hdfs_user']
+user_group = config['configurations']['global']['user_group']
+artifact_dir = "/tmp/HDP-artifacts/"
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+start_hiveserver2_path = "/tmp/start_hiveserver2_script"
+start_metastore_path = "/tmp/start_metastore_script"
+
+hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+java64_home = config['hostLevelParams']['java_home']
+
+##### MYSQL
+
+db_name = config['configurations']['global']['hive_database_name']
+mysql_user = "mysql"
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = "/tmp/addMysqlUser.sh"
+
+########## HCAT
+
+hcat_conf_dir = '/etc/hcatalog/conf'
+
+metastore_port = 9933
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['global']['hcat_user']
+webhcat_user = config['configurations']['global']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+
+hadoop_conf_dir = '/etc/hadoop/conf'
+
+#hive-log4j.properties.template
+if ('hive-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hive-log4j']
+else:
+  log4j_props = None
+
+#hive-exec-log4j.properties.template
+if ('hive-exec-log4j' in config['configurations']):
+  log4j_exec_props = config['configurations']['hive-exec-log4j']
+else:
+  log4j_exec_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..111e8a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+from hcat_service_check import hcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
+      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
+      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
+    else:
+      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
+
+    File(params.smoke_test_path,
+         content=StaticFile('hiveserver2Smoke.sh'),
+         mode=0755
+    )
+
+    File(params.smoke_test_sql,
+         content=StaticFile('hiveserver2.sql')
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True,
+            user=params.smokeuser)
+
+    hcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
new file mode 100644
index 0000000..7770975
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hcat-env.sh.j2
new file mode 100644
index 0000000..2a35240
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hcat-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{hcat_user}}
+METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
new file mode 100644
index 0000000..548262a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HUE/metainfo.xml
index 56654df..0a6b59e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HUE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HUE/metainfo.xml
@@ -19,12 +19,13 @@
     <user>root</user>
     <comment>Hue is a graphical user interface to operate and develop
       applications for Apache Hadoop.</comment>
-    <version>2.2.0.1.3.2.0</version>
+    <version>2.2.0.1.3.3.0</version>
 
     <components>
         <component>
             <name>HUE_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
         </component>
     </components>
 


[25/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
new file mode 100644
index 0000000..b9f0892
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
@@ -0,0 +1,643 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+{# TODO: Look for { or } in created file #}
+# NAGIOS SERVER Check (status log update)
+{% if hostgroup_defs['nagios-server'] %}
+define service {
+        name                            hadoop-service
+        use                             generic-service
+        notification_options            w,u,c,r,f,s
+        first_notification_delay        0
+        notification_interval           0                 # Send the notification once
+        contact_groups                  admins
+        notifications_enabled           1
+        event_handler_enabled           1
+        register                        0
+}
+
+define service {        
+        hostgroup_name          nagios-server        
+        use                     hadoop-service
+        service_description     NAGIOS::Nagios status log freshness
+        servicegroups           NAGIOS
+        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
+        normal_check_interval   5
+        retry_check_interval    0.5
+        max_check_attempts      2
+}
+
+# NAGIOS SERVER HDFS Checks
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes with space available
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{# used only for HDP2 #}
+{% if hostgroup_defs['namenode'] and dfs_ha_enabled %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::NameNode HA Healthy
+        servicegroups           HDFS
+        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      5
+}
+{% endif %}
+
+# AMBARI AGENT Checks
+{% for hostname in all_hosts %}
+define service {
+        host_name	        {{ hostname }}
+        use                     hadoop-service
+        service_description     AMBARI::Ambari Agent process
+        servicegroups           AMBARI
+        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% endfor %}
+
+# NAGIOS SERVER ZOOKEEPER Checks
+{% if hostgroup_defs['zookeeper-servers'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
+        servicegroups           ZOOKEEPER
+        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+# NAGIOS SERVER HBASE Checks
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HBASE::Percent RegionServers live
+        servicegroups           HBASE
+        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+
+
+# GANGLIA SERVER Checks
+{% if hostgroup_defs['ganglia-server'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Server process
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for NameNode
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% if hostgroup_defs['jobtracker'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for JobTracker
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HBase Master
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['resourcemanager'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% endif %}
+
+{% if hostgroup_defs['snamenode'] %}
+# Secondary namenode checks
+define service {
+        hostgroup_name          snamenode
+        use                     hadoop-service
+        service_description     NAMENODE::Secondary NameNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['namenode'] %}
+# HDFS Checks
+{%  for namenode_hostname in namenode_host %}
+{# TODO: check if we can get rid of str, lower #}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if env.system.os_family != "suse" %}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_webui!namenode!{{ namenode_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      5
+}
+
+{%  endfor  %}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Blocks health
+        servicegroups           HDFS
+        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::HDFS capacity utilization
+        servicegroups           HDFS
+        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   10
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+{% endif %}
+
+# MAPREDUCE Checks
+{# On HDP1 here are jobtracker and tasktracker alters #}
+
+{% if hostgroup_defs['resourcemanager'] %}
+# YARN::RESOURCEMANAGER Checks 
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager Web UI
+        servicegroups           YARN
+        check_command           check_webui!resourcemanager!{{ rm_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.os_family != "suse" %}
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
+        servicegroups           YARN
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager RPC latency
+        servicegroups           YARN
+        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['nodemanagers'] %}
+# YARN::NODEMANAGER Checks
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager health
+        servicegroups           YARN
+        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     NODEMANAGER::Percent NodeManagers live
+        servicegroups           YARN
+        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+# MAPREDUCE::JOBHISTORY Checks
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!historyserver2!{{ hs_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.os_family != "suse" %}
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer CPU utilization
+        servicegroups           MAPREDUCE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer RPC latency
+        servicegroups           MAPREDUCE
+        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{%  endif %}
+
+{% if hostgroup_defs['journalnodes'] %}
+# Journalnode checks
+define service {
+        hostgroup_name          journalnodes
+        use                     hadoop-service
+        service_description     JOURNALNODE::JournalNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if dfs_ha_enabled %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent JournalNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+{% if hostgroup_defs['slaves'] %}
+# HDFS::DATANODE Checks
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode space
+        servicegroups           HDFS
+        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      2
+}
+
+{% endif %}
+
+{% if hostgroup_defs['flume-servers'] %}
+# FLUME Checks
+define service {
+        hostgroup_name          flume-servers
+        use                     hadoop-service
+        service_description     FLUME::Flume Agent process
+        servicegroups           FLUME
+        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['zookeeper-servers'] %}
+# ZOOKEEPER Checks
+define service {
+        hostgroup_name          zookeeper-servers
+        use                     hadoop-service
+        service_description     ZOOKEEPER::ZooKeeper Server process
+        servicegroups           ZOOKEEPER
+        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+# HBASE::REGIONSERVER Checks
+define service {
+        hostgroup_name          region-servers
+        use                     hadoop-service
+        service_description     REGIONSERVER::RegionServer process
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{# HBASE:: MASTER Checks
+# define service {
+#         hostgroup_name          hbasemasters
+#         use                     hadoop-service
+#         service_description     HBASEMASTER::HBase Master Web UI
+#         servicegroups           HBASE
+#         check_command           check_webui!hbase!{{ hbase_master_port }}
+#         normal_check_interval   1
+#         retry_check_interval    1
+#         max_check_attempts      3
+# #}
+{%  for hbasemaster in hbase_master_hosts  %}
+{% if env.system.os_family != "suse" %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endfor %}
+{% endif %}
+
+{% if hostgroup_defs['hiveserver'] %}
+# HIVE Metastore check
+define service {
+        hostgroup_name          hiveserver
+        use                     hadoop-service
+        service_description     HIVE-METASTORE::Hive Metastore status
+        servicegroups           HIVE-METASTORE
+        {% if security_enabled %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+# Oozie check
+define service {
+        hostgroup_name          oozie-server
+        use                     hadoop-service
+        service_description     OOZIE::Oozie Server status
+        servicegroups           OOZIE
+        {% if security_enabled %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+# WEBHCAT check
+define service {
+        hostgroup_name          webhcat-server
+        use                     hadoop-service
+        service_description     WEBHCAT::WebHCat Server status
+        servicegroups           WEBHCAT 
+        {% if security_enabled %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hue-server'] %}
+define service {
+        hostgroup_name          hue-server
+        use                     hadoop-service
+        service_description     HUE::Hue Server status
+        servicegroups           HUE
+        check_command           check_hue_status
+        normal_check_interval   100
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2
new file mode 100644
index 0000000..acb2522
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.cfg.j2
@@ -0,0 +1,1349 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
+#
+# Read the documentation for more information on this configuration
+# file.  I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+# Last Modified: 12-14-2008
+#
+##############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes.  This should be the first option specified 
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/contacts.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# Definitions for monitoring the local (Linux) host
+#cfg_file=/etc/nagios/objects/localhost.cfg
+
+# Definitions for monitoring a Windows machine
+#cfg_file=/etc/nagios/objects/windows.cfg
+
+# Definitions for monitoring a router/switch
+#cfg_file=/etc/nagios/objects/switch.cfg
+
+# Definitions for monitoring a network printer
+#cfg_file=/etc/nagios/objects/printer.cfg
+
+# Definitions for hadoop servers
+cfg_file={{nagios_host_cfg}}
+cfg_file={{nagios_hostgroup_cfg}}
+cfg_file={{nagios_servicegroup_cfg}}
+cfg_file={{nagios_service_cfg}}
+cfg_file={{nagios_command_cfg}}
+
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts.  The CGIs read object definitions from 
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file.  You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start 
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions.  The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file={{nagios_resource_cfg}}
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored.  Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+#  restarts.
+
+status_file=/var/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and 
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.  
+# You can either supply a username or a UID.
+
+nagios_user={{nagios_user}}
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.  
+# You can either supply a group name or a GID.
+
+nagios_group={{nagios_group}}
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).  By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side.  If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later.  If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute.  If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly 
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody').  Permissions should be set at the 
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/nagios/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming 
+# external commands before they are processed.  As external commands 
+# are processed by the daemon, they are removed from the buffer.  
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file={{nagios_pid_file}}
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc.  This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values:  0      = Broker nothing
+#         -1      = Broker everything
+#         <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup.  Use multiple directives if you want
+# to load more than one module.  Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+#    1. Shutdown Nagios, replace the module file, restart Nagios
+#    2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+#   broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+#	n	= None - don't rotate the log
+#	h	= Hourly rotation (top of the hour)
+#	d	= Daily rotation (midnight every day)
+#	w	= Weekly rotation (midnight on Saturday evening)
+#	m	= Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be 
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1.  If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0.  If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0.  If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0.  If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1.  If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option.  In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0.  If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0.  If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!  This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed.  Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts.  Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks.  Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+#       s       = Use "smart" interleave factor calculation
+#       x       = Use an interleave factor of x, where x is a
+#                 number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed.  Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of 
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized.  A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that  a single
+# check result reaper event will be allowed to run before 
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!  
+
+check_result_path=/var/nagios/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid.  Files older than this 
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks.  Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state 
+# information when checking host and service dependencies. Normally 
+# Nagios will only use the latest hard host or service state when 
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option. 
+# Values:
+#  0 = Don't use soft state dependencies (default) 
+#  1 = Use soft state dependencies 
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time.  This can help balance the load on
+# the monitoring server.  
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks.  This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled.  Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off.  Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands.  All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down.  Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor.  This is useful for 
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts.  Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down.  The state 
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting.  If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set 
+# program status variables based on the values saved in the
+# retention file.  If you want to use retained program status
+# information, set this value to 1.  If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file.  If you
+# If you want to use retained scheduling info, set this
+# value to 1.  If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.  
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options.  For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options.  For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files.  Setting this to 60 means
+# that each interval is one minute long (60 seconds).  Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available.  It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios.  Nagios is critical to you - make sure you keep it in
+# good shape.  Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance 
+# with our privacy policy - see http://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option deterines what data Nagios will send to api.nagios.org when
+# it checks for updates.  By default, Nagios will send information on the 
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not.  Nagios Enterprises uses
+# this data to determine the number of users running specific version of 
+# Nagios.  Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default).  Otherwise set this value to 1 to
+# enable the aggressive check option.  Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started.  Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks.  If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below).  Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed.  These commands are executed only if the
+# enable_performance_data option (above) is set to 1.  The command
+# argument is the short name of a command definition that you 
+# define in your host configuration file.  Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files.  The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text.  A newline is automatically added after each write
+# to the performance data file.  Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below.  A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files.  The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_services option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_hosts option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios.  This option is useful
+# if you have distributed or failover monitoring setup.  In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts.  If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance.  Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT.  By default, a passive host check
+# result will put a host into a HARD state type.  This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically 
+# check for orphaned host service checks.  Since service checks are
+# not rescheduled until the results of their previous execution 
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled.  A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks.  Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results.  If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results.  If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".  
+# Flapping occurs when a host or service changes between
+# states too frequently.  When Nagios detects that a 
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping.  Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+#         0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does.  This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+#	us		(MM-DD-YYYY HH:MM:SS)
+#	euro    	(DD-MM-YYYY HH:MM:SS)
+#	iso8601		(YYYY-MM-DD HH:MM:SS)
+#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in.  If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path 
+# to include your timezone.  Example:
+#
+#   <Directory "/usr/local/nagios/sbin/">
+#      SetEnv TZ "Australia/Brisbane"
+#      ...
+#   </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located.  If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file = {{nagios_p1_pl}}
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime.  This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more 
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc.  This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+#	$HOSTOUTPUT$
+#	$HOSTPERFDATA$
+#	$HOSTACKAUTHOR$
+#	$HOSTACKCOMMENT$
+#	$SERVICEOUTPUT$
+#	$SERVICEPERFDATA$
+#	$SERVICEACKAUTHOR$
+#	$SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files.  Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression 
+# matching takes place in the object config files.  This option
+# only has an effect if regular expression matching is enabled
+# (see above).  If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?).  If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon.  Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes.  Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+#         0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+#         0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.  Enabling this option can cause performance issues in 
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+#         0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks).  If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+#        0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks).  Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems.  Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this.  If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+#        0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file.  OR values together to log multiple
+# types of information.
+# Values: 
+#          -1 = Everything
+#          0 = Nothing
+#	   1 = Functions
+#          2 = Configuration
+#          4 = Process information
+#	   8 = Scheduled events
+#          16 = Host/service checks
+#          32 = Notifications
+#          64 = Event broker
+#          128 = External commands
+#          256 = Commands
+#          512 = Scheduled downtime
+#          1024 = Comments
+#          2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+#         1 = More detailed
+#         2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file.  If
+# the file grows larger than this size, it will be renamed with a .old
+# extension.  If a file already exists with a .old extension it will
+# automatically be deleted.  This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2
new file mode 100644
index 0000000..d8936a0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.conf.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
+# Last Modified: 11-26-2005
+#
+# This file contains examples of entries that need
+# to be incorporated into your Apache web server
+# configuration file.  Customize the paths, etc. as
+# needed to fit your system.
+#
+
+ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
+
+<Directory "/usr/lib/nagios/cgi">
+#  SSLRequireSSL
+   Options ExecCGI
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+
+Alias /nagios "/usr/share/nagios"
+
+<Directory "/usr/share/nagios">
+#  SSLRequireSSL
+   Options None
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2
new file mode 100644
index 0000000..01e21ac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/nagios.j2
@@ -0,0 +1,146 @@
+#!/bin/sh
+# $Id$
+# Nagios	Startup script for the Nagios monitoring daemon
+#
+# chkconfig:	- 85 15
+# description:	Nagios is a service monitoring system
+# processname: nagios
+# config: /etc/nagios/nagios.cfg
+# pidfile: /var/nagios/nagios.pid
+#
+### BEGIN INIT INFO
+# Provides:		nagios
+# Required-Start:	$local_fs $syslog $network
+# Required-Stop:	$local_fs $syslog $network
+# Short-Description:    start and stop Nagios monitoring server
+# Description:		Nagios is is a service monitoring system 
+### END INIT INFO
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+prefix="/usr"
+exec_prefix="/usr"
+exec="/usr/sbin/nagios"
+prog="nagios"
+config="/etc/nagios/nagios.cfg"
+pidfile="{{nagios_pid_file}}"
+user="{{nagios_user}}"
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+lockfile=/var/lock/subsys/$prog
+
+start() {
+    [ -x $exec ] || exit 5
+    [ -f $config ] || exit 6
+    echo -n $"Starting $prog: "
+    daemon --user=$user $exec -d $config
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && touch $lockfile
+    return $retval
+}
+
+stop() {
+    echo -n $"Stopping $prog: "
+    killproc -d 10 $exec
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && rm -f $lockfile
+    return $retval
+}
+
+
+restart() {
+    stop
+    start
+}
+
+reload() {
+    echo -n $"Reloading $prog: "
+    killproc $exec -HUP
+    RETVAL=$?
+    echo
+}
+
+force_reload() {
+    restart
+}
+
+check_config() {
+        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
+        RETVAL=$?
+        if [ $RETVAL -ne 0 ] ; then
+                echo -n $"Configuration validation failed"
+                failure
+                echo
+                exit 1
+
+        fi
+}
+
+
+case "$1" in
+    start)
+        status $prog && exit 0
+	check_config
+        $1
+        ;;
+    stop)
+        status $prog|| exit 0
+        $1
+        ;;
+    restart)
+	check_config
+        $1
+        ;;
+    reload)
+        status $prog || exit 7
+	check_config
+        $1
+        ;;
+    force-reload)
+	check_config
+        force_reload
+        ;;
+    status)
+        status $prog
+        ;;
+    condrestart|try-restart)
+        status $prog|| exit 0
+	check_config
+        restart
+        ;;
+    configtest)
+        echo -n  $"Checking config for $prog: "
+        check_config && success
+        echo
+	;;
+    *)
+        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
+        exit 2
+esac
+exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2
new file mode 100644
index 0000000..23c7a56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/resource.cfg.j2
@@ -0,0 +1,33 @@
+###########################################################################
+#
+# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
+#
+# Last Modified: 09-10-2003
+#
+# You can define $USERx$ macros in this file, which can in turn be used
+# in command definitions in your host config file(s).  $USERx$ macros are
+# useful for storing sensitive information such as usernames, passwords,
+# etc.  They are also handy for specifying the path to plugins and
+# event handlers - if you decide to move the plugins or event handlers to
+# a different directory in the future, you can just update one or two
+# $USERx$ macros, instead of modifying a lot of command definitions.
+#
+# The CGIs will not attempt to read the contents of resource files, so
+# you can set restrictive permissions (600 or 660) on them.
+#
+# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
+#
+# Resource files may also be used to store configuration directives for
+# external data sources like MySQL...
+#
+###########################################################################
+
+# Sets $USER1$ to be the path to the plugins
+$USER1$={{plugins_dir}}
+
+# Sets $USER2$ to be the path to event handlers
+#$USER2$={{eventhandlers_dir}}
+
+# Store some usernames and passwords (hidden from the CGIs)
+#$USER3$=someuser
+#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml
new file mode 100644
index 0000000..169085c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log4j.appender.oozie</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.oozie.DatePattern</name>
+    <value>.yyyy-MM-dd-HH</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.File</name>
+    <value>${oozie.log.dir}/oozie.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.oozieops</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.File</name>
+    <value>${oozie.log.dir}/oozie-ops.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.File</name>
+    <value>${oozie.log.dir}/oozie-instrumentation.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.File</name>
+    <value>${oozie.log.dir}/oozie-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.File</name>
+    <value>${oozie.log.dir}/oozie-jpa.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.logger.openjpa</name>
+    <value>INFO, openjpa</value>
+  </property>
+  <property>
+    <name>log4j.logger.oozieops</name>
+    <value>INFO, oozieops</value>
+  </property>
+  <property>
+    <name>log4j.logger.oozieinstrumentation</name>
+    <value>ALL, oozieinstrumentation</value>
+  </property>
+  <property>
+    <name>log4j.logger.oozieaudit</name>
+    <value>ALL, oozieaudit</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.oozie</name>
+    <value>INFO, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop</name>
+    <value>WARN, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.mortbay</name>
+    <value>WARN, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.hsqldb</name>
+    <value>WARN, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.security.authentication.server</name>
+    <value>INFO, oozie</value>
+  </property>
+
+</configuration>


[24/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
index 515e669..9a2f0b9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
@@ -16,23 +16,70 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>4.0.0.2.0.6.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.0.0.2.1.1</version>
+      <components>
         <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
+          <name>OOZIE_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>OOZIE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>oozie-site</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>oozie.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>oozie-client.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>extjs-2.2-1</name>
+            </package>
+            <!--TODO: uncomment this after package will be available in repo-->
+            <!--<package>-->
+              <!--<type>rpm</type>-->
+              <!--<name>falcon-0.4.0.2.0.6.0-76</name>-->
+            <!--</package>-->
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
new file mode 100644
index 0000000..2cb5a7a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  su - ${smoke_test_user} -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the couner and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export oozie_conf_dir=$1
+export hadoop_conf_dir=$2
+export smoke_test_user=$3
+export security_enabled=$4
+export smoke_user_keytab=$5
+export kinit_path_local=$6
+
+export OOZIE_EXIT_CODE=0
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
+export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
+cd $OOZIE_EXAMPLES_DIR
+
+tar -zxf oozie-examples.tar.gz
+sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else 
+  kinitcmd=""
+fi
+
+su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
+su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
+su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+echo $cmd
+job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id"
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..97a513c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
new file mode 100644
index 0000000..99b42ea
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+
+def oozie(is_server=False # TODO: see if see can remove this
+              ):
+  import params
+  #TODO hack for falcon el
+  oozie_site = dict(params.config['configurations']['oozie-site'])
+  oozie_site["oozie.services.ext"] = 'org.apache.oozie.service.JMSAccessorService,' + oozie_site["oozie.services.ext"]
+  XmlConfig( "oozie-site.xml",
+    conf_dir = params.conf_dir, 
+    configurations = oozie_site,
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  Directory( params.conf_dir,
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
+    owner = params.oozie_user
+  )
+
+  if (params.log4j_props != None):
+    PropertiesFile('oozie-log4j.properties',
+      dir=params.conf_dir,
+      properties=params.log4j_props,
+      mode=0664,
+      owner=params.oozie_user,
+      group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user
+    )
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
+    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
+     -o {check_db_connection_jar_name}'"),
+      not_if  = format("[ -f {check_db_connection_jar} ]")
+    )
+    
+  oozie_ownership( )
+  
+  if is_server:      
+    oozie_server_specific( )
+  
+def oozie_ownership(
+):
+  import params
+  
+  File ( format("{conf_dir}/adminusers.txt"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+def oozie_server_specific(
+):
+  import params
+  
+  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
+  Directory( oozie_server_directorties,
+    owner = params.oozie_user,
+    mode = 0755,
+    recursive = True
+  )
+       
+  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
+  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
+  
+  # this is different for HDP1
+  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir} && mkdir -p {oozie_libext_dir} && cp {ext_js_path} {oozie_libext_dir}")
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    cmd3 += format(" && cp {jdbc_driver_jar} {oozie_libext_dir}")
+  #falcon el extension
+  if params.has_falcon_host:
+    cmd3 += format(' && cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-0.4.0.2.0.6.0-76.jar {oozie_libext_dir}')
+  # this is different for HDP1
+  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh prepare-war")
+  
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  Execute( [cmd1, cmd2, cmd3],
+    not_if  = no_op_test
+  )
+  Execute( cmd4,
+    user = params.oozie_user,
+    not_if  = no_op_test
+  )
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py
new file mode 100644
index 0000000..1d5db39
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,33 @@
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py
new file mode 100644
index 0000000..6c00738
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,47 @@
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=True)
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    #TODO remove this when config command will be implemented
+    self.configure(env)
+    oozie_service(action='start')
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
new file mode 100644
index 0000000..e9edcc9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def oozie_service(action = 'start'): # 'start' or 'stop'
+  import params
+
+  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
+    
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+      
+    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
+    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+      
+    if db_connection_check_command:
+      Execute( db_connection_check_command)
+                  
+    Execute( cmd1,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+      ignore_failures = True
+    ) 
+    
+    Execute( cmd2,
+      user = params.oozie_user,       
+      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+    )
+    
+    Execute( start_cmd,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+    )
+  elif action == 'stop':
+    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
+    Execute( stop_cmd,
+      only_if  = no_op_test
+    )
+
+  
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
new file mode 100644
index 0000000..9e45f9d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+oozie_user = config['configurations']['global']['oozie_user']
+smokeuser = config['configurations']['global']['smokeuser']
+conf_dir = "/etc/oozie/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+user_group = config['configurations']['global']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hadoop_prefix = "/usr"
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
+ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
+oozie_libext_dir = "/usr/lib/oozie/libext"
+lzo_enabled = config['configurations']['global']['lzo_enabled']
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
+oozie_keytab = config['configurations']['global']['oozie_keytab']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+java_share_dir = "/usr/share/java"
+
+java_home = config['hostLevelParams']['java_home']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['global']['oozie_log_dir']
+oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_lib_dir = "/var/lib/oozie/"
+oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+else:
+  jdbc_driver_jar = ""
+
+hostname = config["hostname"]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+falcon_home = '/usr/lib/falcon'
+
+#oozie-log4j.properties
+if ('oozie-log4j' in config['configurations']):
+  log4j_props = config['configurations']['oozie-log4j']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py
new file mode 100644
index 0000000..7c1c1f2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class OozieServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    # on HDP1 this file is different
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    oozie_smoke_shell_file( smoke_test_file_name)
+  
+def oozie_smoke_shell_file(
+  file_name
+):
+  import params
+
+  File( format("/tmp/{file_name}"),
+    content = StaticFile(file_name),
+    mode = 0755
+  )
+  
+  if params.security_enabled:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
+  else:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
+
+  Execute( format("/tmp/{file_name}"),
+    command   = sh_cmd,
+    tries     = 3,
+    try_sleep = 5,
+    logoutput = True
+  )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServiceCheck().execute()
+  
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+  #main()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
new file mode 100644
index 0000000..c44fcf4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2
new file mode 100644
index 0000000..270a1a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-env.sh.j2
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#      http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+# export OOZIE_HTTP_PORT=11000
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..e4a2662
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml
new file mode 100644
index 0000000..a0db719
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log4j.logger.org.apache.pig</name>
+    <value>info, A</value>
+  </property>
+  <property>
+    <name>log4j.appender.A</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.A.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.A.layout.ConversionPattern</name>
+    <value>%-4r [%t] %-5p %c %x - %m%n</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
index d85a456..4cb6a5e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
@@ -16,15 +16,46 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.12.0.2.0.6.1</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
         <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
+          <name>PIG</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>pig-log4j</config-type>
+      </configuration-dependencies>
 
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh
new file mode 100644
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
new file mode 100644
index 0000000..781e635
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+pig_conf_dir = "/etc/pig/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hdfs_user = config['configurations']['global']['hdfs_user']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"
+
+#log4j.properties
+if ('pig-log4j' in config['configurations']):
+  log4j_props = config['configurations']['pig-log4j']
+else:
+  log4j_props = None
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
new file mode 100644
index 0000000..d8aadf7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+
+def pig():
+  import params
+
+  Directory( params.pig_conf_dir,
+    owner = params.hdfs_user,
+    group = params.user_group
+  )
+
+  pig_TemplateConfig( ['pig-env.sh','pig.properties'])
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+      dir=params.pig_conf_dir,
+      properties=params.log4j_props,
+      mode=0664,
+      owner=params.hdfs_user,
+      group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
+    File(format("{params.pig_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hdfs_user
+    )
+
+def pig_TemplateConfig(name):
+  import params
+  
+  if not isinstance(name, list):
+    name = [name]
+    
+  for x in name:
+    TemplateConfig( format("{pig_conf_dir}/{x}"),
+        owner = params.hdfs_user
+    )
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py
new file mode 100644
index 0000000..acd0cb1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from pig import pig
+
+         
+class PigClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    pig()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
new file mode 100644
index 0000000..3cca087
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class PigServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    input_file = 'passwd'
+    output_file = "pigsmoke.out"
+  
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    test_cmd = format("fs -test -e {output_file}")
+  
+    ExecuteHadoop( create_file_cmd,
+      tries     = 3,
+      try_sleep = 5,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+  
+    File( '/tmp/pigSmoke.sh',
+      content = StaticFile("pigSmoke.sh"),
+      mode = 0755
+    )
+  
+    Execute( "pig /tmp/pigSmoke.sh",
+      tries     = 3,
+      try_sleep = 5,
+      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      user      = params.smokeuser,
+      logoutput = True
+    )
+  
+    ExecuteHadoop( test_cmd,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigServiceCheck().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig-env.sh.j2
new file mode 100644
index 0000000..ad10c21
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig-env.sh.j2
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+if [ -d "/usr/lib/tez" ]; then
+  PIG_OPTS="-Dmapreduce.framework.name=yarn"
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig.properties.j2
new file mode 100644
index 0000000..6fcb233
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/templates/pig.properties.j2
@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Pig configuration file. All values can be overwritten by command line arguments.
+
+# log4jconf log4j configuration file
+# log4jconf=./conf/log4j.properties
+
+# a file that contains pig script
+#file=
+
+# load jarfile, colon separated
+#jar=
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=local
+
+#pig.logfile=
+
+#Do not spill temp files smaller than this size (bytes)
+#pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+#pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+#pig.exec.reducers.bytes.per.reducer=1000000000
+#pig.exec.reducers.max=999
+
+#Use this option only when your Pig job will otherwise die because of
+#using more counter than hadoop configured limit
+#pig.disable.counter=true
+hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
index 4220c3c..ea115c7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
@@ -16,15 +16,45 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.4.2.0.6.1</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.4.2.1.1</version>
 
-    <components>
+      <components>
         <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
+          <name>SQOOP</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>sqoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
new file mode 100644
index 0000000..7de3367
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+security_enabled = config['configurations']['global']['security_enabled']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+hbase_home = "/usr"
+hive_home = "/usr"
+zoo_conf_dir = "/etc/zookeeper"
+sqoop_lib = "/usr/lib/sqoop/lib"
+sqoop_user = "sqoop"
+
+keytab_path = config['configurations']['global']['keytab_path']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py
new file mode 100644
index 0000000..c42501a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
+    Execute("sqoop version",
+            user = params.smokeuser,
+            logoutput = True
+    )
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
new file mode 100644
index 0000000..148a833
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group
+  )
+  sqoop_TemplateConfig("sqoop-env.sh")
+  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
+          owner = params.sqoop_user,
+          group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  pass
+
+def sqoop_TemplateConfig(name, tag=None):
+  import params
+  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
+                  owner = params.sqoop_user,
+                  template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..6829557
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from sqoop import sqoop
+
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/templates/sqoop-env.sh.j2
new file mode 100644
index 0000000..90cbc75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/templates/sqoop-env.sh.j2
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/global.xml
deleted file mode 100644
index fed9c6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/global.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hcat_conf_dir</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description></description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description></description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description></description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description></description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml
index 1177cd2..bf08814 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml
@@ -16,20 +16,50 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.12.0.2.0.6.1</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>WEBHCAT</name>
+      <comment>This is comment for WEBHCAT service</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
         <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
+          <name>WEBHCAT_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-    <config-type>webhcat-site</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <configuration-dependencies>
+        <config-type>webhcat-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..cefc4f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/files/templetonSmoke.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
new file mode 100644
index 0000000..83211e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+webhcat_user = config['configurations']['global']['webhcat_user']
+download_url = config['configurations']['global']['apache_artifacts_download_url']
+
+config_dir = '/etc/hcatalog/conf'
+
+templeton_log_dir = config['configurations']['global']['hcat_log_dir']
+templeton_pid_dir = status_params.templeton_pid_dir
+
+pid_file = status_params.pid_file
+
+hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+hadoop_home = '/usr'
+user_group = config['configurations']['global']['user_group']
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/service_check.py
new file mode 100644
index 0000000..58b4d25
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/service_check.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+class WebHCatServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File('/tmp/templetonSmoke.sh',
+         content= StaticFile('templetonSmoke.sh'),
+         mode=0755
+    )
+
+    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+                 " {security_enabled} {kinit_path_local}",
+                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True)
+
+if __name__ == "__main__":
+  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/status_params.py
new file mode 100644
index 0000000..21dde6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
+pid_file = format('{templeton_pid_dir}/webhcat.pid')


[29/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..e6e7fb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..95f3e30
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+def hbase(type=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+  
+  Directory( params.conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+  
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "hdfs-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+  
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+      configurations = params.config['configurations']['hbase-policy'],
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  
+  hbase_TemplateConfig( 'hbase-env.sh')     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
+  
+  if type != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory ( [params.tmp_dir, params.log_dir],
+      owner = params.hbase_user,
+      recursive = True
+    )
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+      dir=params.conf_dir,
+      properties=params.log4j_props,
+      mode=0664,
+      owner=params.hbase_user,
+      group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.conf_dir}/log4j.properties"))):
+    File(format("{params.conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..0f2a1bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/'
+  stdoutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
+  
+  HbaseClient().execute()
+  
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..dba89fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+
+  if params.hbase_drain_only == True:
+    print "TBD: Remove host from draining"
+    pass
+
+  else:
+
+    kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};") if params.security_enabled else ""
+
+    hosts = params.hbase_excluded_hosts.split(",")
+    for host in hosts:
+      if host:
+        print "TBD: Add host to draining"
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {conf_dir} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
+
+
+pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..9c78e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='master')
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
+  stroutputf = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
+  
+  HbaseMaster().execute()
+  
+if __name__ == "__main__":
+  HbaseMaster().execute()
+  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..2d91e75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='regionserver')
+      
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseRegionServer().execute()
+  
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..7a1248b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {conf_dir}")
+    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
+    
+    daemon_cmd = None
+    no_op_test = None
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
+
+    if daemon_cmd is not None:
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..7db6306
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from functions import calc_xmn_from_xms
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hbase/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/region_drainer.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_excluded_hosts = default("/commandParams/excluded_hosts", "")
+hbase_drain_only = default("/commandParams/mark_draining_only", "")
+
+hbase_user = config['configurations']['global']['hbase_user']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+user_group = config['configurations']['global']['user_group']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['global']['hbase_log_dir']
+master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
+master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
+regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+smoke_test_user = config['configurations']['global']['smokeuser']
+smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+service_check_data = get_unique_id_and_date()
+
+if security_enabled:
+  
+  _use_hostname_in_principal = default('instance_name', True)
+  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
+  _hostname = config['hostname']
+  _kerberos_domain = config['configurations']['global']['kerberos_domain']
+  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
+  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
+  
+  if _use_hostname_in_principal:
+    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
+  else:
+    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
+    
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#log4j.properties
+if ('hbase-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hbase-log4j']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..ff6d0ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
+  
+    File( '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+def main():
+  import sys
+  command_type = 'perform'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseServiceCheck().execute()
+  
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..c9b20ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..2583f44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..9f2b616
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8660
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8660
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8660
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..b8505b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..61fe62f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..696718e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,5 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..9102d35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..722cfcc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,8 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..cb9b7b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,8 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
new file mode 100644
index 0000000..b22ae5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,2 @@
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml
deleted file mode 100644
index df5e901..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.12.0.2.0.6.1</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..4fd0b22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,305 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hadoop.root.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>.</value>
+  </property>
+  <property>
+    <name>hadoop.log.file</name>
+    <value>hadoop.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hadoop.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.taskid</name>
+    <value>null</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.iscleanup</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.noKeepSplits</name>
+    <value>4</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.totalLogFileSize</name>
+    <value>100</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.purgeLogSplits</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.logsRetainHours</name>
+    <value>12</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA</name>
+    <value>org.apache.hadoop.mapred.TaskLogAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.taskId</name>
+    <value>${hadoop.tasklog.taskid}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.isCleanup</name>
+    <value>${hadoop.tasklog.iscleanup}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.totalLogFileSize</name>
+    <value>${hadoop.tasklog.totalLogFileSize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.security.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.category.SecurityLogger</name>
+    <value>${hadoop.security.logger}</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.file</name>
+    <value>SecurityAuth.audit</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.File</name>
+    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.File</name>
+    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxFileSize</name>
+    <value>${hadoop.security.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxBackupIndex</name>
+    <value>${hadoop.security.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>hdfs.audit.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
+    <value>${hdfs.audit.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.File</name>
+    <value>${hadoop.log.dir}/hdfs-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>mapred.audit.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.mapred.AuditLogger</name>
+    <value>${mapred.audit.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.mapred.AuditLogger</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.File</name>
+    <value>${hadoop.log.dir}/mapred-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.File</name>
+    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxFileSize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxBackupIndex</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.metrics.log.level</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.metrics2</name>
+    <value>${hadoop.metrics.log.level}</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service</name>
+    <value>ERROR</value>
+  </property>
+  <property>
+    <name>log4j.appender.NullAppender</name>
+    <value>org.apache.log4j.varia.NullAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.log.metrics.EventCounter</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
index aee8236..296b0c1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
@@ -22,7 +22,7 @@
 
 <configuration>
 
-<!-- file system properties -->
+  <!-- file system properties -->
 
   <property>
     <name>dfs.namenode.name.dir</name>
@@ -60,11 +60,11 @@
     <name>dfs.datanode.data.dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
     <final>true</final>
   </property>
 
@@ -72,21 +72,21 @@
     <name>dfs.hosts.exclude</name>
     <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
   </property>
 
-<!--
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
--->
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
 
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
@@ -130,14 +130,14 @@
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
-  </description>
+    </description>
   </property>
 
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
-  </description>
+    </description>
   </property>
 
   <property>
@@ -156,21 +156,21 @@
     <name>dfs.namenode.safemode.threshold-pct</name>
     <value>1.0f</value>
     <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.namenode.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
   </property>
 
   <property>
     <name>dfs.datanode.balance.bandwidthPerSec</name>
     <value>6250000</value>
     <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
   </property>
 
   <property>
@@ -222,113 +222,113 @@
   <property>
     <name>dfs.namenode.http-address</name>
     <value>localhost:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.transfer.threads</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>fs.permissions.umask-mode</name>
-<value>022</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.enabled</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.superusergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
     <description>
-        Kerberos principal name for the secondary NameNode.
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
     </description>
   </property>
 
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
 
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+  </property>
+
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the NameNode
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+  <!--
+    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+  -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
     <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
   </property>
 
@@ -368,64 +368,64 @@ Kerberos principal name for the NameNode
   <property>
     <name>dfs.datanode.kerberos.principal</name>
     <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    <description>
+      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
   </property>
 
   <property>
     <name>dfs.namenode.keytab.file</name>
     <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
+    <description>
+      Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
     <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
+    <description>
+      Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.datanode.keytab.file</name>
     <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
+    <description>
+      The filename of the keytab file for the DataNode.
     </description>
   </property>
 
   <property>
     <name>dfs.namenode.https-address</name>
     <value>localhost:50470</value>
-  <description>The https address where namenode binds</description>
+    <description>The https address where namenode binds</description>
 
   </property>
 
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-<description>The permissions that should be there on dfs.datanode.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.datanode.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
   </property>
 
   <property>
     <name>dfs.namenode.accesstime.precision</name>
     <value>0</value>
     <description>The access time for HDFS file is precise upto this value.
-                 The default value is 1 hour. Setting a value of 0 disables
-                 access times for HDFS.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
     </description>
   </property>
 
   <property>
-   <name>dfs.cluster.administrators</name>
-   <value> hdfs</value>
-   <description>ACL for who all can view the default servlets in the HDFS</description>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
   </property>
 
   <property>
@@ -458,14 +458,14 @@ don't exist, they will be created with this permission.</description>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
   </property>
-  
+
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
-     If the port is 0 then the server will start on a free port. </description>
+      If the port is 0 then the server will start on a free port. </description>
   </property>
-  
+
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/grid/0/hdfs/journal</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
index 2ca6c10..393d167 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
@@ -16,45 +16,138 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>2.2.0.2.0.6.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.1.1</version>
 
-    <components>
+      <components>
         <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-        
+
         <component>
-            <name>JOURNALNODE</name>
-            <category>MASTER</category>
+          <name>JOURNALNODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
           <name>ZKFC</name>
           <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>hdfs-site</config-type>
-      <config-type>hadoop-policy</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..57fdb35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_datanode import datanode
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    datanode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    datanode(action="stop")
+
+  def configure(self, env):
+    import params
+
+    datanode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..ec24c7d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def config(self, env):
+    import params
+
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..f7d9f15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+def datanode(action=None):
+  import params
+
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    for data_dir in params.dfs_data_dir.split(","):
+      Directory(data_dir,
+                recursive=True,
+                mode=0755,
+                owner=params.hdfs_user,
+                group=params.user_group)
+
+  if action == "start":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )
+  if action == "stop":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )


[41/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2
deleted file mode 100644
index 3530444..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
-group   notConfigGroup v1           notConfigUser
-group   notConfigGroup v2c           notConfigUser
-view    systemview    included   .1
-access  notConfigGroup ""      any       noauth    exact  systemview none none
-
-syslocation Hadoop 
-syscontact HadoopMaster 
-dontLogTCPWrappersConnects yes
-
-###############################################################################
-# disk checks
-
-disk / 10000
-
-
-###############################################################################
-# load average checks
-#
-
-# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
-#
-# 1MAX:   If the 1 minute load average is above this limit at query
-#         time, the errorFlag will be set.
-# 5MAX:   Similar, but for 5 min average.
-# 15MAX:  Similar, but for 15 min average.
-
-# Check for loads:
-#load 12 14 14
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2
deleted file mode 100644
index d01d37e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/metainfo.xml
index ca45822..06361d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/metainfo.xml
@@ -19,4 +19,5 @@
     <versions>
 	  <active>true</active>
     </versions>
+    <extends>1.3.2</extends>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/configuration/global.xml
deleted file mode 100644
index f1fa4de..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/configuration/global.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
deleted file mode 100644
index bebb54e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>FLUME_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index 16df0b8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 36381ce..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,113 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>GANGLIA</name>
-      <comment>Ganglia Metrics Collection system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>GANGLIA_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/ganglia_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>GANGLIA_MONITOR</name>
-          <category>SLAVE</category>
-          <cardinality>ALL</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/ganglia_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>libganglia-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmetad-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-web-3.5.7-99.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>python-rrdtool.x86_64</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-modules-python-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>apache2</name>
-          </package>
-          <package>
-            <type>rpm</type>
-            <name>apache2-mod_php5</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <package>
-            <type>rpm</type>
-            <name>httpd</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <package>
-            <type>rpm</type>
-            <name>httpd</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh
deleted file mode 100644
index e60eb31..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh
deleted file mode 100644
index 0cec8dc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh
deleted file mode 100644
index d94db5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh
deleted file mode 100644
index e28610e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh
deleted file mode 100644
index 87da4dd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh
+++ /dev/null
@@ -1,545 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py
deleted file mode 100644
index 3fe6901..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-import re
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx -= 1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-
-def stripList(l):
-  return ([x.strip() for x in l])
-
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-def _walk(*args, **kwargs):
-  for root, dirs, files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root, dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                      os.path.join(path, file), cf, start, end, resolution,
-                      pointInTime)
-        else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                        os.path.join(path, matchedFile), cf, start, end,
-                        resolution, pointInTime)
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh
deleted file mode 100644
index 8b7c257..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index 5145b9c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index ab5102d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index 239b62e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index e79472b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 1af3eb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 52b79be..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
-  import params
-
-  Group(params.user_group)
-  Group(params.gmetad_user)
-  Group(params.gmond_user)
-  User(params.gmond_user,
-       groups=[params.gmond_user])
-  User(params.gmetad_user,
-       groups=[params.gmetad_user])
-
-
-def config():
-  import params
-
-  shell_cmds_dir = params.ganglia_shell_cmds_dir
-  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
-                 'gmondLib.sh', 'rrdcachedLib.sh',
-                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
-                 'startRrdcached.sh', 'stopGmetad.sh',
-                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
-  Directory(shell_cmds_dir,
-            owner="root",
-            group="root",
-            recursive=True
-  )
-  init_file("gmetad")
-  init_file("gmond")
-  for sh_file in shell_files:
-    shell_file(sh_file)
-  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
-    ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
-  import params
-
-  File("/etc/init.d/hdp-" + name,
-       content=StaticFile(name + ".init"),
-       mode=0755
-  )
-
-
-def shell_file(name):
-  import params
-
-  File(params.ganglia_shell_cmds_dir + os.sep + name,
-       content=StaticFile(name),
-       mode=0755
-  )
-
-
-def ganglia_TemplateConfig(name, mode=0755, tag=None):
-  import params
-
-  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
-                 owner="root",
-                 group="root",
-                 template_tag=tag,
-                 mode=mode
-  )
-
-
-def generate_daemon(ganglia_service,
-                    name=None,
-                    role=None,
-                    owner=None,
-                    group=None):
-  import params
-
-  cmd = ""
-  if ganglia_service == "gmond":
-    if role == "server":
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
-    else:
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
-  elif ganglia_service == "gmetad":
-    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
-  else:
-    raise Fail("Unexpected ganglia service")
-  Execute(format(cmd),
-          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
-                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
-  )


[10/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index 695a7ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,131 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
-    <value>50</value>
-    <description>
-      No description
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
deleted file mode 100644
index 429c39f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>ResourceManager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of NodeManager Hosts.</description>
-  </property>
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
-
-  <!--MAPREDUCE2-->
-
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
deleted file mode 100644
index 9389ed0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-    <description> Comma separated list of user and group names that are allowed
-      to submit jobs to the 'default' queue. The user list and the group list
-      are separated by a blank. For e.g. alice,bob group1,group2.
-      If set to the special value '*', it means all users are allowed to
-      submit jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-    <description> Comma separated list of user and group names that are allowed
-      to delete jobs or modify job's priority for jobs not owned by the current
-      user in the 'default' queue. The user list and the group list
-      are separated by a blank. For e.g. alice,bob group1,group2.
-      If set to the special value '*', it means all users are allowed to do
-      this operation.
-    </description>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
deleted file mode 100644
index 0d22a65..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
+++ /dev/null
@@ -1,386 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-<!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-    <description>
-      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for map tasks.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for reduce tasks.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-log4j.xml
deleted file mode 100644
index 879cf03..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hadoop.mapreduce.jobsummary.logger</name>
-    <value>${hadoop.root.logger}</value>
-  </property>
-  <property>
-    <name>hadoop.mapreduce.jobsummary.log.file</name>
-    <value>hadoop-mapreduce.jobsummary.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.JSA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.JSA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.JSA.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.JSA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.JSA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-
-  <!--Only for HDP2.0 below-->
-  <property>
-    <name>yarn.server.resourcemanager.appsummary.log.file</name>
-    <value>hadoop-mapreduce.jobsummary.log</value>
-  </property>
-  <property>
-    <name>yarn.server.resourcemanager.appsummary.logger</name>
-    <value>${hadoop.root.logger}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RMSUMMARY</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RMSUMMARY.File</name>
-    <value>/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RMSUMMARY.MaxFileSize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>log4j.appender.RMSUMMARY.MaxBackupIndex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.appender.RMSUMMARY.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RMSUMMARY.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary</name>
-    <value>${yarn.server.resourcemanager.appsummary.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary</name>
-    <value>false</value>
-  </property>
-  <!--Only for HDP2.0 above-->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 9bfd9ee..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,342 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-    <description> The address of ResourceManager. </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      TThe minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>true</value>
-    <description> Are acls enabled. </description>
-  </property>
-
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>*</value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-    <description>The auxiliary service class to use </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>Location to aggregate logs to. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
deleted file mode 100644
index a5a1e94..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,174 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.1.1</version>
-      <components>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>YARN_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn-nodemanager</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-mapreduce</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn-proxyserver</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn-resourcemanager</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>capacity-scheduler</config-type>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-queue-acls</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>MAPREDUCE2</name>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-mapreduce</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-mapreduce-historyserver</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>capacity-scheduler</config-type>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-queue-acls</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-  </services>
-</metainfo>


[33/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-log4j.xml
deleted file mode 100644
index 169085c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-log4j.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>log4j.appender.oozie</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.oozie.DatePattern</name>
-    <value>.yyyy-MM-dd-HH</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.File</name>
-    <value>${oozie.log.dir}/oozie.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozie.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-
-  <property>
-    <name>log4j.appender.oozieops</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.File</name>
-    <value>${oozie.log.dir}/oozie-ops.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieops.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.File</name>
-    <value>${oozie.log.dir}/oozie-instrumentation.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieinstrumentation.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.File</name>
-    <value>${oozie.log.dir}/oozie-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.oozieaudit.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.File</name>
-    <value>${oozie.log.dir}/oozie-jpa.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.Append</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.openjpa.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.logger.openjpa</name>
-    <value>INFO, openjpa</value>
-  </property>
-  <property>
-    <name>log4j.logger.oozieops</name>
-    <value>INFO, oozieops</value>
-  </property>
-  <property>
-    <name>log4j.logger.oozieinstrumentation</name>
-    <value>ALL, oozieinstrumentation</value>
-  </property>
-  <property>
-    <name>log4j.logger.oozieaudit</name>
-    <value>ALL, oozieaudit</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.oozie</name>
-    <value>INFO, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop</name>
-    <value>WARN, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.mortbay</name>
-    <value>WARN, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.hsqldb</name>
-    <value>WARN, oozie</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.security.authentication.server</name>
-    <value>INFO, oozie</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index d8bb062..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,253 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.authorization.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
-      #AUTHENTICATION_HANDLER_CLASSNAME#.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted NameNode for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor
-    </value>
-    <description>
-      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
-      be used in workflows. This property is a convenience property to add extensions to the built in executors without
-      having to include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>
-      shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
-    </value>
-    <description>
-      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
-      trims the value, if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      DB user name.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value></value>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
deleted file mode 100644
index c473435..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,114 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>OOZIE</name>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>3.3.2.1.3.3.0</version>
-      <components>
-        <component>
-          <name>OOZIE_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>OOZIE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>oozie.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>oozie-client.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>extjs-2.2-1</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>oozie-site</config-type>
-        <config-type>oozie-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh
deleted file mode 100644
index 2446544..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
-su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh
deleted file mode 100644
index 97a513c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py
deleted file mode 100644
index e1a7869..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import os
-from resource_management import *
-
-def oozie(is_server=False
-              ):
-  import params
-
-  XmlConfig( "oozie-site.xml",
-    conf_dir = params.conf_dir, 
-    configurations = params.config['configurations']['oozie-site'],
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0664
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
-  )
-
-  if (params.log4j_props != None):
-    PropertiesFile('oozie-log4j.properties',
-                   dir=params.conf_dir,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.oozie_user,
-                   group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
-    File(format("{params.conf_dir}/oozie-log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.oozie_user
-    )
-
-  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
-    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
-     -o {check_db_connection_jar_name}'"),
-      not_if  = format("[ -f {check_db_connection_jar} ]")
-    )
-    
-  oozie_ownership( )
-  
-  if is_server:      
-    oozie_server_specific( )
-  
-def oozie_ownership(
-):
-  import params
-  
-  File ( format("{conf_dir}/adminusers.txt"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/hadoop-config.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/oozie-default.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  Directory ( format("{conf_dir}/action-conf"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/action-conf/hive.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-def oozie_server_specific(
-):
-  import params
-  
-  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
-  Directory( oozie_server_directorties,
-    owner = params.oozie_user,
-    mode = 0755,
-    recursive = True
-  )
-       
-  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
-  
-  # this is different for HDP2
-  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir}")
-  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    cmd3 += format(" && mkdir -p {oozie_libext_dir} && cp {jdbc_driver_jar} {oozie_libext_dir}")
-    
-  # this is different for HDP2
-  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 {hadoop_jar_location} -extjs {ext_js_path} {jar_option} {jar_path}")
-  
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  Execute( [cmd1, cmd2, cmd3],
-    not_if  = no_op_test
-  )
-  Execute( cmd4,
-    user = params.oozie_user,
-    not_if  = no_op_test
-  )
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py
deleted file mode 100644
index 23fdc12..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=False)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py
deleted file mode 100644
index eca2a56..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=True)
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='start')
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index 1d8767c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from resource_management import *
-
-def oozie_service(action = 'start'): # 'start' or 'stop'
-  import params
-
-  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
-    
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-      
-    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
-      
-    if db_connection_check_command:
-      Execute( db_connection_check_command)
-                  
-    Execute( cmd1,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-      ignore_failures = True
-    ) 
-    
-    Execute( cmd2,
-      user = params.oozie_user,       
-      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
-    )
-    
-    Execute( start_cmd,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-    )
-  elif action == 'stop':
-    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
-    Execute( stop_cmd,
-      only_if  = no_op_test
-    )
-
-  
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index cd3e7bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
-conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-# for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip"
-ext_js_path = "/usr/share/HDP-oozie/ext.zip"
-oozie_libext_dir = "/usr/lib/oozie/libext"
-lzo_enabled = config['configurations']['global']['lzo_enabled']
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
-
-java_home = config['hostLevelParams']['java_home']
-oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
-oozie_lib_dir = "/var/lib/oozie/"
-oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
-
-jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-else:
-  jdbc_driver_jar = ""
-  
-if lzo_enabled or jdbc_driver_name:
-  jar_option = "-jars"         
-else:
-  jar_option = ""
-  
-lzo_jar_suffix = "/usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar" if lzo_enabled else ""
-  
-if lzo_enabled and jdbc_driver_name:
-    jar_path = format("{lzo_jar_suffix}:{jdbc_driver_jar}")        
-else:
-    jar_path = "{lzo_jar_suffix}{jdbc_driver_jar}"
-
-#oozie-log4j.properties
-if ('oozie-log4j' in config['configurations']):
-  log4j_props = config['configurations']['oozie-log4j']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index 7dbfc87..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from resource_management import *
-
-class OozieServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    # on HDP2 this file is different
-    smoke_test_file_name = 'oozieSmoke.sh'
-
-    oozie_smoke_shell_file( smoke_test_file_name)
-  
-def oozie_smoke_shell_file(
-  file_name
-):
-  import params
-
-  File( format("/tmp/{file_name}"),
-    content = StaticFile(file_name),
-    mode = 0755
-  )
-  
-  if params.security_enabled:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
-  else:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
-
-  Execute( format("/tmp/{file_name}"),
-    command   = sh_cmd,
-    tries     = 3,
-    try_sleep = 5,
-    logoutput = True
-  )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServiceCheck().execute()
-  
-if __name__ == "__main__":
-  OozieServiceCheck().execute()
-  #main()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index c44fcf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2
deleted file mode 100644
index 270a1a8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-# export OOZIE_HTTP_PORT=11000
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig-log4j.xml
deleted file mode 100644
index a0db719..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig-log4j.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>log4j.logger.org.apache.pig</name>
-    <value>info, A</value>
-  </property>
-  <property>
-    <name>log4j.appender.A</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.A.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.A.layout.ConversionPattern</name>
-    <value>%-4r [%t] %-5p %c %x - %m%n</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
deleted file mode 100644
index 2b66bbf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.11.1.1.3.3.0</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>pig-log4j</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py
deleted file mode 100644
index 781e635..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-
-pig_conf_dir = "/etc/pig/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
-
-#log4j.properties
-if ('pig-log4j' in config['configurations']):
-  log4j_props = config['configurations']['pig-log4j']
-else:
-  log4j_props = None
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py
deleted file mode 100644
index 0450d5e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  pig_TemplateConfig( ['pig-env.sh','pig.properties'])
-
-  if (params.log4j_props != None):
-    PropertiesFile('log4j.properties',
-                   dir=params.pig_conf_dir,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.hdfs_user,
-                   group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hdfs_user
-    )
-  
-  
-def pig_TemplateConfig(name):
-  import params
-  
-  if not isinstance(name, list):
-    name = [name]
-    
-  for x in name:
-    TemplateConfig( format("{pig_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index acd0cb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from pig import pig
-
-         
-class PigClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py
deleted file mode 100644
index 3cca087..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class PigServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    input_file = 'passwd'
-    output_file = "pigsmoke.out"
-  
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
-    test_cmd = format("fs -test -e {output_file}")
-  
-    ExecuteHadoop( create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-  
-    File( '/tmp/pigSmoke.sh',
-      content = StaticFile("pigSmoke.sh"),
-      mode = 0755
-    )
-  
-    Execute( "pig /tmp/pigSmoke.sh",
-      tries     = 3,
-      try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      user      = params.smokeuser,
-      logoutput = True
-    )
-  
-    ExecuteHadoop( test_cmd,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigServiceCheck().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2
deleted file mode 100644
index b0e17d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2
deleted file mode 100644
index 6fcb233..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counter than hadoop configured limit
-#pig.disable.counter=true
-hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
deleted file mode 100644
index 426bb25..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.3.1.3.3.0</version>
-
-      <components>
-        <component>
-          <name>SQOOP</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>sqoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py
deleted file mode 100644
index 3860581..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py
deleted file mode 100644
index 21a39d9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-security_enabled = config['configurations']['global']['security_enabled']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-sqoop_conf_dir = "/usr/lib/sqoop/conf"
-hbase_home = "/usr"
-hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
-sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = "sqoop"
-
-keytab_path = config['configurations']['global']['keytab_path']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py
deleted file mode 100644
index b872be6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-
-from resource_management import *
-
-
-class SqoopServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
-    Execute("sqoop version",
-            user = params.smokeuser,
-            logoutput = True
-    )
-
-if __name__ == "__main__":
-  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py
deleted file mode 100644
index 492550e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import sys
-
-def sqoop(type=None):
-  import params
-  Link(params.sqoop_lib + "/mysql-connector-java.jar",
-       to = '/usr/share/java/mysql-connector-java.jar'
-  )
-  Directory(params.sqoop_conf_dir,
-            owner = params.sqoop_user,
-            group = params.user_group
-  )
-  sqoop_TemplateConfig("sqoop-env.sh")
-  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
-          owner = params.sqoop_user,
-          group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  pass
-
-def sqoop_TemplateConfig(name, tag=None):
-  import params
-  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
-                  owner = params.sqoop_user,
-                  template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py
deleted file mode 100644
index bd2863c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-from resource_management import *
-
-from sqoop import sqoop
-
-
-class SqoopClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    sqoop(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2
deleted file mode 100644
index 90cbc75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 16d8691..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>


[17/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 296b0c1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,518 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.txns</name>
-    <value>1000000</value>
-    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
-      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
-      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-    <description>
-      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
-    <description>
-      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>
-      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
-      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
-      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml
deleted file mode 100644
index 393d167..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.1.0.2.1.1</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ambari-log4j</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>


[40/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index 207331b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-
-  def start(self, env):
-    ganglia_monitor_service.monitor("start")
-
-  def stop(self, env):
-    ganglia_monitor_service.monitor("stop")
-
-
-  def status(self, env):
-    import status_params
-    pid_file_name = 'gmond.pid'
-    pid_file_count = 0
-    pid_dir = status_params.pid_dir
-    # Recursively check all existing gmond pid files
-    for cur_dir, subdirs, files in os.walk(pid_dir):
-      for file_name in files:
-        if file_name == pid_file_name:
-          pid_file = os.path.join(cur_dir, file_name)
-          check_process_status(pid_file)
-          pid_file_count += 1
-    if pid_file_count == 0: # If no any pid file is present
-      raise ComponentIsNotRunning()
-
-
-  def configure(self, env):
-    import params
-
-    ganglia.groups_and_users()
-
-    Directory(params.ganglia_conf_dir,
-              owner="root",
-              group=params.user_group,
-              recursive=True
-    )
-
-    ganglia.config()
-
-    if params.is_namenode_master:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_jtnode_master:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_rmnode_master:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hsnode_master:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hbase_master:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    pure_slave = not (params.is_namenode_master and
-                      params.is_jtnode_master and
-                      params.is_rmnode_master and
-                      params.is_hsnode_master and
-                      params.is_hbase_master) and params.is_slave
-    if pure_slave:
-      generate_daemon("gmond",
-                      name = "HDPSlaves",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-
-    Directory(path.join(params.ganglia_dir, "conf.d"),
-              owner="root",
-              group=params.user_group
-    )
-
-    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "gmond.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-if __name__ == "__main__":
-  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index d86d894..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
-  if action == "start":
-    Execute("chkconfig gmond off",
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    )
-  Execute(
-    format(
-      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
-    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index 7fa747a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_server_service
-
-
-class GangliaServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/gmetad.pid")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  def configure(self, env):
-    import params
-
-    ganglia.groups_and_users()
-    ganglia.config()
-
-    if params.has_namenodes:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_jobtracker:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_masters:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_resourcemanager:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-    if params.has_historyserver:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_slaves:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-    generate_daemon("gmetad",
-                    name = "gmetad",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-    change_permission()
-    server_files()
-    File(path.join(params.ganglia_dir, "gmetad.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-def change_permission():
-  import params
-
-  Directory('/var/lib/ganglia/dwoo',
-            mode=0777,
-            owner=params.gmetad_user,
-            recursive=True
-  )
-
-
-def server_files():
-  import params
-
-  rrd_py_path = params.rrd_py_path
-  Directory(rrd_py_path,
-            recursive=True
-  )
-  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
-  File(rrd_py_file_path,
-       content=StaticFile("rrd.py"),
-       mode=0755
-  )
-  rrd_file_owner = params.gmetad_user
-  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
-    Directory(params.rrdcached_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              mode=0755,
-              recursive=True
-    )
-    Directory(params.rrdcached_default_base_dir,
-              action = "delete"
-    )
-    Link(params.rrdcached_default_base_dir,
-         to=params.rrdcached_base_dir
-    )
-  elif rrd_file_owner != 'nobody':
-    Directory(params.rrdcached_default_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              recursive=True
-    )
-
-
-if __name__ == "__main__":
-  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
-  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  Execute(format(command),
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index 97ef6bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-
-config = Script.get_config()
-
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = config['configurations']['global']["ganglia_conf_dir"]
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-webserver_group = "apache"
-rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-rm_host = default("/clusterHostInfo/rm_host", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-# datanodes are marked as slave_hosts
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
-hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
-flume_hosts = default("/clusterHostInfo/flume_hosts", [])
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-
-if System.get_instance().os_family == "suse":
-  rrd_py_path = '/srv/www/cgi-bin'
-else:
-  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 3ccad2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index 23588a5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-    HDPJournalNode      {{ganglia_server_host}}   8654
-    HDPFlumeServer      {{ganglia_server_host}}   8655
-    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
-    HDPNodeManager     	{{ganglia_server_host}}   8657
-    HDPTaskTracker     	{{ganglia_server_host}}   8658
-    HDPDataNode       	{{ganglia_server_host}}   8659
-    HDPSlaves       	{{ganglia_server_host}}   8660
-    HDPNameNode         {{ganglia_server_host}}   8661
-    HDPJobTracker     	{{ganglia_server_host}}  8662
-    HDPHBaseMaster      {{ganglia_server_host}}   8663
-    HDPResourceManager  {{ganglia_server_host}}   8664
-    HDPHistoryServer    {{ganglia_server_host}}   8666

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 1ead550..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 4b5bdd1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/global.xml
deleted file mode 100644
index 453184b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.25</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>30</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>86400000</value>
-    <description>HBase Major Compaction.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>60000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>7</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.35</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-log4j.xml
deleted file mode 100644
index 2258d73..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-log4j.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hbase.root.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hbase.security.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hbase.log.dir</name>
-    <value>.</value>
-  </property>
-  <property>
-    <name>hbase.log.file</name>
-    <value>hbase.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hbase.root.logger}</value>
-  </property>
-  <property>
-    <name>log4j.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hbase.log.dir}/${hbase.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hbase.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hbase.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.File</name>
-    <value>${hbase.log.dir}/${hbase.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxFileSize</name>
-    <value>${hbase.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxBackupIndex</name>
-    <value>${hbase.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hbase.security.log.file</name>
-    <value>SecurityAuth.audit</value>
-  </property>
-  <property>
-    <name>hbase.security.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hbase.security.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.File</name>
-    <value>${hbase.log.dir}/${hbase.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxFileSize</name>
-    <value>${hbase.security.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxBackupIndex</name>
-    <value>${hbase.security.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.category.SecurityLogger</name>
-    <value>${hbase.security.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.SecurityLogger</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.NullAppender</name>
-    <value>org.apache.log4j.varia.NullAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.zookeeper</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hbase</name>
-    <value>DEBUG</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher</name>
-    <value>INFO</value>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index bd4d61f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,370 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>60000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
-      (no authentication), and 'kerberos'.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-  
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
deleted file mode 100644
index 7a7c3d6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,124 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.94.6.1.3.3.0</version>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_regionserver.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hbase</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-        <config-type>hbase-log4j</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 39fe6e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
deleted file mode 100644
index e6e7fb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
-  """
-  @param heapsize_str: str (e.g '1000m')
-  @param xmn_percent: float (e.g 0.2)
-  @param xmn_max: integer (e.g 512)
-  """
-  heapsize = int(re.search('\d+',heapsize_str).group(0))
-  heapsize_unit = re.search('\D+',heapsize_str).group(0)
-  xmn_val = int(math.floor(heapsize*xmn_percent))
-  xmn_val -= xmn_val % 8
-  
-  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
-  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
deleted file mode 100644
index fddd1b7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-def hbase(type=None # 'master' or 'regionserver' or 'client'
-              ):
-  import params
-  
-  Directory( params.conf_dir,
-      owner = params.hbase_user,
-      group = params.user_group,
-      recursive = True
-  )
-  
-  XmlConfig( "hbase-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig( "hdfs-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-  
-  if 'hbase-policy' in params.config['configurations']:
-    XmlConfig( "hbase-policy.xml",
-      configurations = params.config['configurations']['hbase-policy'],
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  # Manually overriding ownership of file installed by hadoop package
-  else: 
-    File( format("{conf_dir}/hbase-policy.xml"),
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  
-  hbase_TemplateConfig( 'hbase-env.sh')     
-       
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
-  )
-
-  hbase_TemplateConfig( 'regionservers')
-
-  if params.security_enabled:
-    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
-  
-  if type != "client":
-    Directory( params.pid_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-  
-    Directory ( [params.tmp_dir, params.log_dir],
-      owner = params.hbase_user,
-      recursive = True
-    )
-
-  if (params.log4j_props != None):
-    PropertiesFile('log4j.properties',
-                   dir=params.conf_dir,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.hbase_user,
-                   group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.conf_dir}/log4j.properties"))):
-    File(format("{params.conf_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hbase_user
-    )
-
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
-  import params
-
-  TemplateConfig( format("{conf_dir}/{name}"),
-      owner = params.hbase_user,
-      template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index 0f2a1bc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-
-         
-class HbaseClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/'
-  stdoutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
-  
-  HbaseClient().execute()
-  
-if __name__ == "__main__":
-  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_decommission.py
deleted file mode 100644
index dba89fa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_decommission.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hbase_decommission(env):
-  import params
-
-  env.set_params(params)
-
-  if params.hbase_drain_only == True:
-    print "TBD: Remove host from draining"
-    pass
-
-  else:
-
-    kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};") if params.security_enabled else ""
-
-    hosts = params.hbase_excluded_hosts.split(",")
-    for host in hosts:
-      if host:
-        print "TBD: Add host to draining"
-        regionmover_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {conf_dir} org.jruby.Main {region_mover} unload {host}")
-
-        Execute(regionmover_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-      pass
-    pass
-  pass
-
-
-pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index 9c78e5c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-from hbase_decommission import hbase_decommission
-
-         
-class HbaseMaster(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='master')
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'master',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
-    check_process_status(pid_file)
-
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_decommission(env)
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
-  stroutputf = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
-  
-  HbaseMaster().execute()
-  
-if __name__ == "__main__":
-  HbaseMaster().execute()
-  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index 2d91e75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseRegionServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='regionserver')
-      
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'regionserver',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'regionserver',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
-    check_process_status(pid_file)
-    
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseRegionServer().execute()
-  
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
deleted file mode 100644
index 7a1248b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
-    import params
-  
-    role = name
-    cmd = format("{daemon_script} --config {conf_dir}")
-    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
-    
-    daemon_cmd = None
-    no_op_test = None
-    
-    if action == 'start':
-      daemon_cmd = format("{cmd} start {role}")
-      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    elif action == 'stop':
-      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
-
-    if daemon_cmd is not None:
-      Execute ( daemon_cmd,
-        not_if = no_op_test,
-        user = params.hbase_user
-      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
deleted file mode 100644
index 1d57fc9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from functions import calc_xmn_from_xms
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/hbase/bin/region_drainer.rb"
-hbase_cmd = "/usr/lib/hbase/bin/hbase"
-hbase_excluded_hosts = default("/commandParams/excluded_hosts", "")
-hbase_drain_only = default("/commandParams/mark_draining_only", "")
-
-hbase_user = config['configurations']['global']['hbase_user']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-user_group = config['configurations']['global']['user_group']
-
-# this is "hadoop-metrics2-hbase.properties" for 2.x stacks
-metric_prop_file_name = "hadoop-metrics.properties" 
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
-regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
-service_check_data = get_unique_id_and_date()
-
-if security_enabled:
-  
-  _use_hostname_in_principal = default('instance_name', True)
-  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
-  _hostname = config['hostname']
-  _kerberos_domain = config['configurations']['global']['kerberos_domain']
-  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
-  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
-  
-  if _use_hostname_in_principal:
-    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
-  else:
-    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
-    
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#log4j.properties
-if ('hbase-log4j' in config['configurations']):
-  log4j_props = config['configurations']['hbase-log4j']
-else:
-  log4j_props = None


[21/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index 2f2ca8b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,322 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
-  java_dir = os.path.dirname(params.java_home)
-  java_exec = format("{java_home}/bin/java")
-  
-  if not params.jdk_name:
-    return
-  
-  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}"))
-
-  if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
-  elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
-  
-  Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}")
-  )
-  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
-  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
-  Execute( download_jce,
-        path = ["/bin","/usr/bin/"],
-        not_if =format("test -e {jce_curl_target}"),
-        ignore_failures = True
-  )
-  
-  if params.security_enabled:
-    security_dir = format("{java_home}/jre/lib/security")
-    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
-    Execute(extract_cmd,
-          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
-          cwd  = security_dir,
-          path = ['/bin/','/usr/bin']
-    )
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
-       content=Template("snmpd.conf.j2"))
-  Service("snmpd",
-          action = "restart")
-
-  Execute("/bin/echo 0 > /selinux/enforce",
-          only_if="test -f /selinux/enforce"
-  )
-
-  install_snappy()
-
-  #directories
-  Directory(params.hadoop_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hdfs_log_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hadoop_pid_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  #this doesn't needed with stack 1
-  Directory(os.path.dirname(params.hadoop_tmp_dir),
-            recursive=True,
-            owner=params.hdfs_user,
-            )
-  #files
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if tc_mode:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
-    File(os.path.join(params.hadoop_conf_dir, file),
-         owner=tc_owner,
-         content=Template(file + ".j2")
-    )
-
-  health_check_template = "health_check-v2" #for stack 1 use 'health_check'
-  File(os.path.join(params.hadoop_conf_dir, "health_check"),
-       owner=tc_owner,
-       content=Template(health_check_template + ".j2")
-  )
-
-  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-  if (params.log4j_props != None):
-    PropertiesFile(log4j_filename,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.hdfs_user,
-                   group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-    File(log4j_filename,
-         mode=0644,
-         group=params.user_group,
-         owner=params.hdfs_user,
-    )
-
-  update_log4j_props(log4j_filename)
-
-  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-       owner=params.hdfs_user,
-       content=Template("hadoop-metrics2.properties.j2")
-  )
-
-  db_driver_dload_cmd = ""
-  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-
-  if db_driver_dload_cmd:
-    Execute(db_driver_dload_cmd,
-            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
-    )
-
-
-def setup_configs():
-  """
-  Creates configs for services DHFS mapred
-  """
-  import params
-
-  if "mapred-queue-acls" in params.config['configurations']:
-    XmlConfig("mapred-queue-acls.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'mapred-queue-acls'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-  elif os.path.exists(
-      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
-    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-
-  File(params.task_log4j_properties_location,
-       content=StaticFile("task-log4j.properties"),
-       mode=0755
-  )
-
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  # if params.stack_version[0] == "1":
-  #   Link('/usr/lib/hadoop/hadoop-tools.jar',
-  #         to = '/usr/lib/hadoop/lib/hadoop-tools.jar',
-  #         mode = 0755
-  #   )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
-    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-    File(os.path.join(params.hadoop_conf_dir, 'masters'),
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  generate_include_file()
-
-def update_log4j_props(file):
-  import params
-
-  for key in params.rca_property_map:
-    value = params.rca_property_map[key]
-    Execute(format(
-      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}")
-    )
-
-
-def generate_include_file():
-  import params
-
-  if params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-
-def install_snappy():
-  import params
-
-  snappy_so = "libsnappy.so"
-  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-  so_src_dir_x86 = format("{hadoop_home}/lib")
-  so_src_dir_x64 = format("{hadoop_home}/lib64")
-  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  Execute(
-    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-  Execute(
-    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 77e458f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index bb5795b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2
deleted file mode 100644
index 1f596a3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2
+++ /dev/null
@@ -1,125 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER={{hdfs_user}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-if [ -d "/usr/lib/tez" ]; then
-  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*
-fi
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index a6a66ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2
deleted file mode 100644
index ca7baa2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2
deleted file mode 100644
index cb7b12b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index b84b336..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2
deleted file mode 100644
index 3530444..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
-group   notConfigGroup v1           notConfigUser
-group   notConfigGroup v2c           notConfigUser
-view    systemview    included   .1
-access  notConfigGroup ""      any       noauth    exact  systemview none none
-
-syslocation Hadoop 
-syscontact HadoopMaster 
-dontLogTCPWrappersConnects yes
-
-###############################################################################
-# disk checks
-
-disk / 10000
-
-
-###############################################################################
-# load average checks
-#
-
-# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
-#
-# 1MAX:   If the 1 minute load average is above this limit at query
-#         time, the errorFlag will be set.
-# 5MAX:   Similar, but for 5 min average.
-# 15MAX:  Similar, but for 15 min average.
-
-# Check for loads:
-#load 12 14 14
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2
deleted file mode 100644
index d01d37e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
index ca45822..3c7f87d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
@@ -19,4 +19,5 @@
     <versions>
 	  <active>true</active>
     </versions>
+    <extends>2.0.6</extends>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml
deleted file mode 100644
index d69441b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,108 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>GANGLIA</name>
-      <comment>Ganglia Metrics Collection system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>GANGLIA_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/ganglia_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>GANGLIA_MONITOR</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/ganglia_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>libganglia-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmetad-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-web-3.5.7-99.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>python-rrdtool.x86_64</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-modules-python-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>apache2</name>
-          </package>
-          <package>
-            <type>rpm</type>
-            <name>apache2-mod_php5</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <package>
-            <type>rpm</type>
-            <name>httpd</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <package>
-            <type>rpm</type>
-            <name>httpd</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh
deleted file mode 100644
index e60eb31..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh
deleted file mode 100644
index 0cec8dc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh
deleted file mode 100644
index d94db5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh
deleted file mode 100644
index e28610e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh
deleted file mode 100644
index 87da4dd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh
+++ /dev/null
@@ -1,545 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py
deleted file mode 100644
index 3fe6901..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-import re
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx -= 1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-
-def stripList(l):
-  return ([x.strip() for x in l])
-
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-def _walk(*args, **kwargs):
-  for root, dirs, files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root, dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                      os.path.join(path, file), cf, start, end, resolution,
-                      pointInTime)
-        else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                        os.path.join(path, matchedFile), cf, start, end,
-                        resolution, pointInTime)
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh
deleted file mode 100644
index 8b7c257..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}


[16/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json
deleted file mode 100644
index f33a0c0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json
+++ /dev/null
@@ -1,7800 +0,0 @@
-{
-  "NAMENODE": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Free": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Total": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryMax": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryMax": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Threads": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NameDirStatuses": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalBlocks": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityNonDFSUsed",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/runtime/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryMax": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-          

<TRUNCATED>

[09/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json
deleted file mode 100644
index 68efe9f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json
+++ /dev/null
@@ -1,2534 +0,0 @@
-{
-  "NODEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsFailed": {
-            "metric": "mapred.ShuffleOutputsFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.metrics.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersCompleted": {
-            "metric": "yarn.ContainersCompleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersKilled": {
-            "metric": "yarn.ContainersKilled",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedGB": {
-            "metric": "yarn.AllocatedGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsOK": {
-            "metric": "mapred.ShuffleOutputsOK",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersFailed": {
-            "metric": "yarn.ContainersFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedContainers": {
-            "metric": "yarn.AllocatedContainers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersRunning": {
-            "metric": "yarn.ContainersRunning",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersLaunched": {
-            "metric": "yarn.ContainersLaunched",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AvailableGB": {
-            "metric": "yarn.AvailableGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleConnections": {
-            "metric": "mapred.ShuffleConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersIniting": {
-            "metric": "yarn.ContainersIniting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputBytes": {
-            "metric": "mapred.ShuffleOutputBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.metrics.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsFailed": {
-            "metric": "mapred.ShuffleOutputsFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.metrics.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersCompleted": {
-            "metric": "yarn.ContainersCompleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersKilled": {
-            "metric": "yarn.ContainersKilled",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedGB": {
-            "metric": "yarn.AllocatedGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsOK": {
-            "metric": "mapred.ShuffleOutputsOK",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersFailed": {
-            "metric": "yarn.ContainersFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedContainers": {
-            "metric": "yarn.AllocatedContainers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersRunning": {
-            "metric": "yarn.ContainersRunning",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersLaunched": {
-            "metric": "yarn.ContainersLaunched",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AvailableGB": {
-            "metric": "yarn.AvailableGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleConnections": {
-            "metric": "mapred.ShuffleConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersIniting": {
-            "metric": "yarn.ContainersIniting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputBytes": {
-            "metric": "mapred.ShuffleOutputBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.metrics.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ]
-  },
-  "RESOURCEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "yarn.ClusterMetrics.NumLostNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "yarn.ClusterMetrics.NumActiveNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisCopy": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memMaxM": {
-            "metric": "jvm.JvmMetrics.MemMaxM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountCopy": {
-            "metric": "jvm.JvmMetrics.GcCountCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-            "pointInTime": false,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryMax":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/HeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryMax":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/runtime/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/unhealthyNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/decommissionedNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/startTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/nodeManagers": {
-            "metric": "Hadoop:service=ResourceManager,name=RMNMInfo.LiveNodeManagers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/activeNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/lostNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "yarn.ClusterMetrics.NumLostNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "yarn.ClusterMetrics.NumActiveNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisCopy": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memMaxM": {
-            "metric": "jvm.JvmMetrics.MemMaxM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountCopy": {
-            "metric": "jvm.JvmMetrics.GcCountCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-            "pointInTime": false,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryMax":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/HeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryMax":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-  

<TRUNCATED>

[34/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index 793732e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,714 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for JobTracker
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% endif %}
-
-{% if hostgroup_defs['snamenode'] %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['namenode'] %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if env.system.os_family != "suse" %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   10
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-{% endif %}
-
-# MAPREDUCE Checks
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobtracker!{{ jtnode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobhistory!{{ jobhistory_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% if env.system.os_family != "suse" %}
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ jtnode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobTracker!{{ jtnode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-{% if hostgroup_defs['tasktracker-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     MAPREDUCE::Percent TaskTrackers live
-        servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::TaskTracker process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     TASKTRACKER::TaskTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ tasktracker_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Mapreduce local dir used space
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     ::MapReduce local dir space
-        servicegroups           MAPREDUCE
-        check_command           check_mapred_local_dir_used_space!{{ mapred_local_dir }}!85%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui!resourcemanager!{{ rm_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.os_family != "suse" %}
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.os_family != "suse" %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{# HBASE:: MASTER Checks
-# define service {
-#         hostgroup_name          hbasemasters
-#         use                     hadoop-service
-#         service_description     HBASEMASTER::HBase Master Web UI
-#         servicegroups           HBASE
-#         check_command           check_webui!hbase!{{ hbase_master_port }}
-#         normal_check_interval   1
-#         retry_check_interval    1
-#         max_check_attempts      3
-# #}
-{%  for hbasemaster in hbase_master_hosts  %}
-{% if env.system.os_family != "suse" %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore status
-        servicegroups           HIVE-METASTORE
-        {% if security_enabled %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index acb2522..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1349 +0,0 @@
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=1
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=0
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index d8936a0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
-
-<Directory "/usr/lib/nagios/cgi">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "/usr/share/nagios"
-
-<Directory "/usr/share/nagios">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 01e21ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="/etc/nagios/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 920bfae..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/global.xml
deleted file mode 100644
index ddbf780..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value></value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
-  </property>
-  <property>
-    <name>oozie_database_name</name>
-    <value>oozie</value>
-    <description>Database name used for the Oozie.</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_name</name>
-    <value>oozie</value>
-    <description>Database user name to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-
-</configuration>


[37/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100644
index 3886c33..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import get_unique_id_and_date
-
-def hcat_service_check():
-    import params
-
-    unique = get_unique_id_and_date()
-    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinit_cmd = ""
-
-    File('/tmp/hcatSmoke.sh',
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir)
-
-    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index 5f03871..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-
-def hive(name=None):
-  import params
-
-  if name == 'metastore' or name == 'hiveserver2':
-    hive_config_dir = params.hive_server_conf_dir
-    config_file_mode = 0600
-    jdbc_connector()
-  else:
-    hive_config_dir = params.hive_conf_dir
-    config_file_mode = 0644
-
-  Directory(hive_config_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=config_file_mode
-  )
-
-  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
-               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
-
-  Execute(cmd,
-          not_if=format("[ -f {check_db_connection_jar_name}]"))
-
-  if name == 'metastore':
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=StaticFile('startHiveserver2.sh')
-    )
-
-  if name != "client":
-    crt_directory(params.hive_pid_dir)
-    crt_directory(params.hive_log_dir)
-    crt_directory(params.hive_var_lib)
-
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
-  )
-
-  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
-  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
-
-  log4j_exec_filename = 'hive-exec-log4j.properties'
-  if (params.log4j_exec_props != None):
-    PropertiesFile(log4j_exec_filename,
-                   dir=params.hive_conf_dir,
-                   properties=params.log4j_exec_props,
-                   mode=0664,
-                   owner=params.hive_user,
-                   group=params.user_group
-    )
-  elif (os.path.exists("{params.hive_conf_dir}/{log4j_exec_filename}.template")):
-    File(format("{params.hive_conf_dir}/{log4j_exec_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{params.hive_conf_dir}/{log4j_exec_filename}.template"))
-    )
-
-  log4j_filename = 'hive-log4j.properties'
-  if (params.log4j_props != None):
-    PropertiesFile(log4j_filename,
-                   dir=params.hive_conf_dir,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.hive_user,
-                   group=params.user_group
-    )
-  elif (os.path.exists("{params.hive_conf_dir}/{log4j_filename}.template")):
-    File(format("{params.hive_conf_dir}/{log4j_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{params.hive_conf_dir}/{log4j_filename}.template"))
-    )
-
-
-def crt_directory(name):
-  import params
-
-  Directory(name,
-            recursive=True,
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-
-def crt_file(name):
-  import params
-
-  File(name,
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-
-def jdbc_connector():
-  import params
-
-  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
-    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            path=["/bin", "usr/bin/"])
-
-  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-    cmd = format(
-      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
-      "cp {driver_curl_target} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py
deleted file mode 100644
index 0a5fb2b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-
-from hive import hive
-
-class HiveClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100644
index c741174..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveMetastore(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='metastore')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'metastore',
-                   action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'metastore',
-                   action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py
deleted file mode 100644
index 3ad81a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveServer(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='hiveserver2')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'hiveserver2',
-                  action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'hiveserver2',
-                  action = 'stop'
-    )
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py
deleted file mode 100644
index e8d4e5c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hive_service(
-    name,
-    action='start'):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format(
-      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format(
-      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
-
-  if action == 'start':
-    demon_cmd = format("{cmd}")
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(demon_cmd,
-            user=params.hive_user,
-            not_if=no_op_test
-    )
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
-
-  elif action == 'stop':
-    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
-    Execute(demon_cmd)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100644
index 8567311..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from mysql_service import mysql_service
-
-class MysqlServer(Script):
-
-  if System.get_instance().os_family == "suse":
-    daemon_name = 'mysql'
-  else:
-    daemon_name = 'mysqld'
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action='start')
-
-    File(params.mysql_adduser_path,
-         mode=0755,
-         content=StaticFile('addMysqlUser.sh')
-    )
-
-    # Autoescaping
-    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
-           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-    mysql_service(daemon_name=self.daemon_name, action='stop')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'stop')
-
-  def status(self, env):
-    mysql_service(daemon_name=self.daemon_name, action = 'status')
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100644
index cfb3e08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'):
-  cmd = format('service {daemon_name} {action}')
-
-  if action == 'status':
-    logoutput = False
-  else:
-    logoutput = True
-
-  Execute(cmd,
-          path="/usr/local/bin/:/bin/:/sbin/",
-          tries=1,
-          logoutput=logoutput)
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py
deleted file mode 100644
index 734d3ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_server_conf_dir = "/etc/hive/conf.server"
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-
-#users
-hive_user = config['configurations']['global']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
-#JDBC driver jar name
-hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
-if hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = "mysql-connector-java.jar"
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = "ojdbc6.jar"
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-#common
-hive_metastore_port = config['configurations']['global']['hive_metastore_port']
-hive_var_lib = '/var/lib/hive'
-hive_server_host = config['clusterHostInfo']['hive_server_host']
-hive_url = format("jdbc:hive2://{hive_server_host}:10000")
-
-smokeuser = config['configurations']['global']['smokeuser']
-smoke_test_sql = "/tmp/hiveserver2.sql"
-smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-
-#hive_env
-hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-
-#hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh'
-
-hadoop_home = '/usr'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-java_share_dir = '/usr/share/java'
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
-artifact_dir = "/tmp/HDP-artifacts/"
-
-target = format("{hive_lib}/{jdbc_jar_name}")
-
-jdk_location = config['hostLevelParams']['jdk_location']
-driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
-
-start_hiveserver2_path = "/tmp/start_hiveserver2_script"
-start_metastore_path = "/tmp/start_metastore_script"
-
-hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-java64_home = config['hostLevelParams']['java_home']
-
-##### MYSQL
-
-db_name = config['configurations']['global']['hive_database_name']
-mysql_user = "mysql"
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = "/tmp/addMysqlUser.sh"
-
-########## HCAT
-
-hcat_conf_dir = '/etc/hcatalog/conf'
-
-metastore_port = 9933
-hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
-
-hadoop_conf_dir = '/etc/hadoop/conf'
-
-#hive-log4j.properties.template
-if ('hive-log4j' in config['configurations']):
-  log4j_props = config['configurations']['hive-log4j']
-else:
-  log4j_props = None
-
-#hive-exec-log4j.properties.template
-if ('hive-exec-log4j' in config['configurations']):
-  log4j_exec_props = config['configurations']['hive-exec-log4j']
-else:
-  log4j_exec_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py
deleted file mode 100644
index 111e8a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-from hcat_service_check import hcat_service_check
-
-class HiveServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
-      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
-      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
-    else:
-      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
-
-    File(params.smoke_test_path,
-         content=StaticFile('hiveserver2Smoke.sh'),
-         mode=0755
-    )
-
-    File(params.smoke_test_sql,
-         content=StaticFile('hiveserver2.sql')
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True,
-            user=params.smokeuser)
-
-    hcat_service_check()
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py
deleted file mode 100644
index 7770975..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
-hive_pid = 'hive-server.pid'
-
-hive_metastore_pid = 'hive.pid'
-
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2
deleted file mode 100644
index 2a35240..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HCAT_PID_DIR={{hcat_pid_dir}}/
-HCAT_LOG_DIR={{hcat_log_dir}}/
-HCAT_CONF_DIR={{hcat_conf_dir}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT={{hcat_dbroot}}
-USER={{hcat_user}}
-METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2
deleted file mode 100644
index 548262a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=
-export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/global.xml
deleted file mode 100644
index c49480f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/global.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hue_pid_dir</name>
-    <value>/var/run/hue</value>
-    <description>Hue Pid Dir.</description>
-  </property>
-  <property>
-    <name>hue_log_dir</name>
-    <value>/var/log/hue</value>
-    <description>Hue Log Dir.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/hue-site.xml
deleted file mode 100644
index 6eb52a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/configuration/hue-site.xml
+++ /dev/null
@@ -1,290 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
-   limitations under the License.# limitations under the License.
--->
-
-<configuration>
-  <!-- General Hue server configuration properties -->
-  <property>
-      <name>send_debug_messages</name>
-      <value>1</value>
-      <description></description>
-  </property>
-
-  <property>
-    <name>database_logging</name>
-    <value>0</value>
-    <description>To show database transactions, set database_logging to 1.
-      default, database_logging=0</description>
-  </property>
-
-  <property>
-    <name>secret_key</name>
-    <value></value>
-    <description>This is used for secure hashing in the session store.</description>
-  </property>
-
-  <property>
-    <name>http_host</name>
-    <value>0.0.0.0</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>http_port</name>
-    <value>8000</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>time_zone</name>
-    <value>America/Los_Angeles</value>
-    <description>Time zone name</description>
-  </property>
-
-  <property>
-    <name>django_debug_mode</name>
-    <value>1</value>
-    <description>Turn off debug</description>
-  </property>
-
-  <property>
-    <name>use_cherrypy_server</name>
-    <value>false</value>
-    <description>Set to true to use CherryPy as the webserver, set to false
-      to use Spawning as the webserver. Defaults to Spawning if
-      key is not specified.</description>
-  </property>
-
-  <property>
-    <name>http_500_debug_mode</name>
-    <value>1</value>
-    <description>Turn off backtrace for server error</description>
-  </property>
-
-  <property>
-    <name>server_user</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>server_group</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>backend_auth_policy</name>
-    <value>desktop.auth.backend.AllowAllBackend</value>
-    <description>Authentication backend.</description>
-  </property>
-
-  <!-- Hue Database configuration properties -->
-  <property>
-    <name>db_engine</name>
-    <value>mysql</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_host</name>
-    <value>localhost</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_port</name>
-    <value>3306</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_user</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_password</name>
-    <value>1111</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_name</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <!-- Hue Email configuration properties -->
-  <property>
-    <name>smtp_host</name>
-    <value>localhost</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_port</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_user</name>
-    <value></value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_password</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>tls</name>
-    <value>no</value>
-    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
-  </property>
-
-  <property>
-    <name>default_from_email</name>
-    <value>sandbox@hortonworks.com</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <!-- Hue Hadoop configuration properties -->
-  <property>
-    <name>fs_defaultfs</name>
-    <value></value>
-    <description>Enter the filesystem uri. E.g
-      .:hdfs://sandbox:8020</description>
-  </property>
-
-  <property>
-    <name>webhdfs_url</name>
-    <value></value>
-    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
-      using the Thrift plugin (used in Hue 1.x), this must be uncommented
-      and explicitly set to the empty value.
-      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
-  </property>
-
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
-  </property>
-
-  <property>
-    <name>jobtracker_port</name>
-    <value>50030</value>
-    <description>The port where the JobTracker IPC listens on.</description>
-  </property>
-
-  <property>
-    <name>hadoop_mapred_home</name>
-    <value>/usr/lib/hadoop/lib</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the ResourceManager.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_port</name>
-    <value></value>
-    <description>The port where the ResourceManager IPC listens on.</description>
-  </property>
-
-  <!-- Hue Beeswax configuration properties -->
-  <property>
-    <name>hive_home_dir</name>
-    <value></value>
-    <description>Hive home directory.</description>
-  </property>
-
-  <property>
-    <name>hive_conf_dir</name>
-    <value></value>
-    <description>Hive configuration directory, where hive-site.xml is
-      located.</description>
-  </property>
-
-  <property>
-    <name>templeton_url</name>
-    <value></value>
-    <description>WebHcat http URL</description>
-  </property>
-
-  <!-- Hue shell types configuration -->
-  <property>
-    <name>pig_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type pig</description>
-  </property>
-
-  <property>
-    <name>pig_shell_command</name>
-    <value>/usr/bin/pig -l /dev/null</value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>pig_java_home</name>
-    <value></value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>hbase_nice_name</name>
-    <value>HBase Shell</value>
-    <description>Define and configure a new shell type hbase</description>
-  </property>
-
-  <property>
-    <name>hbase_shell_command</name>
-    <value>/usr/bin/hbase shell</value>
-    <description>Define and configure a new shell type hbase.</description>
-  </property>
-
-  <property>
-    <name>bash_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type bash for testing
-      only</description>
-  </property>
-
-  <property>
-    <name>bash_shell_command</name>
-    <value>/bin/bash</value>
-    <description>Define and configure a new shell type bash for testing only
-      .</description>
-  </property>
-
-  <!-- Hue Settings for the User Admin application -->
-  <property>
-    <name>whitelist</name>
-    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
-    <description>proxy settings</description>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
deleted file mode 100644
index 0a6b59e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Hue is a graphical user interface to operate and develop
-      applications for Apache Hadoop.</comment>
-    <version>2.2.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>HUE_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/global.xml
deleted file mode 100644
index 4633855..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>JobTracker Host.</description>
-  </property>
-  <property>
-    <name>tasktracker_hosts</name>
-    <value></value>
-    <description>TaskTracker hosts.</description>
-  </property>
-  <property>
-    <name>mapred_local_dir</name>
-    <value>/hadoop/mapred</value>
-    <description>MapRed Local Directories.</description>
-  </property>
-  <property>
-    <name>mapred_system_dir</name>
-    <value>/mapred/system</value>
-    <description>MapRed System Directories.</description>
-  </property>
-  <property>
-    <name>scheduler_name</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>MapRed Capacity Scheduler.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_newsize</name>
-    <value>200</value>
-    <description>Mem New Size.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>Max New size.</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>jtnode_heapsize</name>
-    <value>1024</value>
-    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
-  </property>
-  <property>
-    <name>mapred_map_tasks_max</name>
-    <value>4</value>
-    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_red_tasks_max</name>
-    <value>2</value>
-    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_cluster_map_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_cluster_red_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_job_map_mem_mb</name>
-    <value>-1</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-  <property>
-    <name>mapred_child_java_opts_sz</name>
-    <value>768</value>
-    <description>Java options for the TaskTracker child processes.</description>
-  </property>
-  <property>
-    <name>io_sort_mb</name>
-    <value>200</value>
-    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
-  </property>
-  <property>
-    <name>io_sort_spill_percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
-  </property>
-  <property>
-    <name>mapreduce_userlog_retainhours</name>
-    <value>24</value>
-    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
-  </property>
-  <property>
-    <name>maxtasks_per_job</name>
-    <value>-1</value>
-    <description>Maximum number of tasks for a single Job</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>snappy_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>rca_enabled</name>
-    <value>true</value>
-    <description>Enable Job Diagnostics.</description>
-  </property>
-  <property>
-    <name>mapred_hosts_exclude</name>
-    <value></value>
-    <description>Exclude entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_hosts_include</name>
-    <value></value>
-    <description>Include entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_jobstatus_dir</name>
-    <value>/mapred/jobstatus</value>
-    <description>Job Status directory</description>
-  </property>
-  <property>
-    <name>task_controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>Task Controller.</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>MapReduce User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index 9389ed0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-    <description> Comma separated list of user and group names that are allowed
-      to submit jobs to the 'default' queue. The user list and the group list
-      are separated by a blank. For e.g. alice,bob group1,group2.
-      If set to the special value '*', it means all users are allowed to
-      submit jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-    <description> Comma separated list of user and group names that are allowed
-      to delete jobs or modify job's priority for jobs not owned by the current
-      user in the 'default' queue. The user list and the group list
-      are separated by a blank. For e.g. alice,bob group1,group2.
-      If set to the special value '*', it means all users are allowed to do
-      this operation.
-    </description>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>


[31/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..c8939fc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,132 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..e11bfac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py
@@ -0,0 +1,37 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_java()
+    setup_hadoop()
+    setup_configs()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..1f943df
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -0,0 +1,197 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#java params
+artifact_dir = "/tmp/HDP-artifacts/"
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#users and groups
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+yarn_user = config['configurations']['global']['yarn_user']
+
+user_group = config['configurations']['global']['user_group']
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#snmp
+snmp_conf_dir = "/etc/snmp/"
+snmp_source = "0.0.0.0/0"
+snmp_community = "hadoop"
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+#hadoop params
+hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+hadoop_lib_home = "/usr/lib/hadoop/lib"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_home = "/usr"
+hadoop_bin = "/usr/lib/hadoop/sbin"
+
+task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+limits_conf_dir = "/etc/security/limits.d"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+rca_enabled = config['configurations']['global']['rca_enabled']
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+if System.get_instance().os_family == "suse":
+  jsvc_path = "/usr/lib/bigtop-utils"
+else:
+  jsvc_path = "/usr/libexec/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['global']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("jtnode_heapsize","1024m")
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+#hdfs ha properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namenode_ids:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+rca_property_map = {
+  'ambari.jobhistory.database': ambari_db_rca_url,
+  'ambari.jobhistory.driver': ambari_db_rca_driver,
+  'ambari.jobhistory.user': ambari_db_rca_username,
+  'ambari.jobhistory.password': ambari_db_rca_password,
+  'ambari.jobhistory.logger': 'DEBUG,JHA',
+
+  'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
+  'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
+  'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
+  'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
+  'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
+
+  'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
+  'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
+}
+
+if ('hdfs-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hdfs-log4j']
+  if 'yarn-log4j' in config['configurations']:
+    log4j_props.update(config['configurations']['yarn-log4j'])
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..2f2ca8b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,322 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
+  java_dir = os.path.dirname(params.java_home)
+  java_exec = format("{java_home}/bin/java")
+  
+  if not params.jdk_name:
+    return
+  
+  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}"))
+
+  if params.jdk_name.endswith(".bin"):
+    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+  elif params.jdk_name.endswith(".gz"):
+    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+  
+  Execute(install_cmd,
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}")
+  )
+  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
+  Execute( download_jce,
+        path = ["/bin","/usr/bin/"],
+        not_if =format("test -e {jce_curl_target}"),
+        ignore_failures = True
+  )
+  
+  if params.security_enabled:
+    security_dir = format("{java_home}/jre/lib/security")
+    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
+    Execute(extract_cmd,
+          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+          cwd  = security_dir,
+          path = ['/bin/','/usr/bin']
+    )
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
+       content=Template("snmpd.conf.j2"))
+  Service("snmpd",
+          action = "restart")
+
+  Execute("/bin/echo 0 > /selinux/enforce",
+          only_if="test -f /selinux/enforce"
+  )
+
+  install_snappy()
+
+  #directories
+  Directory(params.hadoop_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hdfs_log_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hadoop_pid_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  #this doesn't needed with stack 1
+  Directory(os.path.dirname(params.hadoop_tmp_dir),
+            recursive=True,
+            owner=params.hdfs_user,
+            )
+  #files
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if tc_mode:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
+    File(os.path.join(params.hadoop_conf_dir, file),
+         owner=tc_owner,
+         content=Template(file + ".j2")
+    )
+
+  health_check_template = "health_check-v2" #for stack 1 use 'health_check'
+  File(os.path.join(params.hadoop_conf_dir, "health_check"),
+       owner=tc_owner,
+       content=Template(health_check_template + ".j2")
+  )
+
+  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+  if (params.log4j_props != None):
+    PropertiesFile(log4j_filename,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hdfs_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+    File(log4j_filename,
+         mode=0644,
+         group=params.user_group,
+         owner=params.hdfs_user,
+    )
+
+  update_log4j_props(log4j_filename)
+
+  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+       owner=params.hdfs_user,
+       content=Template("hadoop-metrics2.properties.j2")
+  )
+
+  db_driver_dload_cmd = ""
+  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+
+  if db_driver_dload_cmd:
+    Execute(db_driver_dload_cmd,
+            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
+    )
+
+
+def setup_configs():
+  """
+  Creates configs for services DHFS mapred
+  """
+  import params
+
+  if "mapred-queue-acls" in params.config['configurations']:
+    XmlConfig("mapred-queue-acls.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'mapred-queue-acls'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+  elif os.path.exists(
+      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
+    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  File(params.task_log4j_properties_location,
+       content=StaticFile("task-log4j.properties"),
+       mode=0755
+  )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  # if params.stack_version[0] == "1":
+  #   Link('/usr/lib/hadoop/hadoop-tools.jar',
+  #         to = '/usr/lib/hadoop/lib/hadoop-tools.jar',
+  #         mode = 0755
+  #   )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+    File(os.path.join(params.hadoop_conf_dir, 'masters'),
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  generate_include_file()
+
+def update_log4j_props(file):
+  import params
+
+  for key in params.rca_property_map:
+    value = params.rca_property_map[key]
+    Execute(format(
+      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}")
+    )
+
+
+def generate_include_file():
+  import params
+
+  if params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+
+def install_snappy():
+  import params
+
+  snappy_so = "libsnappy.so"
+  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+  so_src_dir_x86 = format("{hadoop_home}/lib")
+  so_src_dir_x64 = format("{hadoop_home}/lib64")
+  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+  Execute(
+    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
+  Execute(
+    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..77e458f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..bb5795b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..1f596a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-env.sh.j2
@@ -0,0 +1,125 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..a6a66ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hdfs.conf.j2
new file mode 100644
index 0000000..ca7baa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hdfs.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check-v2.j2
new file mode 100644
index 0000000..cb7b12b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check-v2.j2
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..b84b336
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/slaves.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/slaves.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/snmpd.conf.j2
new file mode 100644
index 0000000..3530444
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/snmpd.conf.j2
@@ -0,0 +1,48 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
+group   notConfigGroup v1           notConfigUser
+group   notConfigGroup v2c           notConfigUser
+view    systemview    included   .1
+access  notConfigGroup ""      any       noauth    exact  systemview none none
+
+syslocation Hadoop 
+syscontact HadoopMaster 
+dontLogTCPWrappersConnects yes
+
+###############################################################################
+# disk checks
+
+disk / 10000
+
+
+###############################################################################
+# load average checks
+#
+
+# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
+#
+# 1MAX:   If the 1 minute load average is above this limit at query
+#         time, the errorFlag will be set.
+# 5MAX:   Similar, but for 5 min average.
+# 15MAX:  Similar, but for 15 min average.
+
+# Check for loads:
+#load 12 14 14
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000..d01d37e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/taskcontroller.cfg.j2
@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index 18eae57..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description></description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description></description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description></description>
-  </property>
-  <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description></description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Location of rrd files.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
index 3e6d37a..d69441b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
@@ -16,25 +16,93 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GANGLIA</name>
+      <comment>Ganglia Metrics Collection system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
+          <name>GANGLIA_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/ganglia_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
+          <name>GANGLIA_MONITOR</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/ganglia_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>libganglia-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmetad-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-web-3.5.7-99.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>python-rrdtool.x86_64</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-modules-python-3.5.0-99</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>apache2</name>
+          </package>
+          <package>
+            <type>rpm</type>
+            <name>apache2-mod_php5</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh
new file mode 100644
index 0000000..e60eb31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh
new file mode 100644
index 0000000..0cec8dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh
new file mode 100644
index 0000000..d94db5d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init
new file mode 100644
index 0000000..20b388e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmetad startup script
+# processname: hdp-gmetad
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh
new file mode 100644
index 0000000..e28610e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh
@@ -0,0 +1,204 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+# New Default RRA
+# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
+# Two weeks of data points at 1 minute resolution (average)
+#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
+# Retaining existing resolution
+RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+     "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init
new file mode 100644
index 0000000..afb7026
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmond startup script
+# processname: hdp-gmond
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL


[46/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapreduce-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapreduce-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapreduce-log4j.xml
new file mode 100644
index 0000000..09c9c4f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapreduce-log4j.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hadoop.mapreduce.jobsummary.logger</name>
+    <value>${hadoop.root.logger}</value>
+  </property>
+
+  <property>
+    <name>hadoop.mapreduce.jobsummary.log.file</name>
+    <value>hadoop-mapreduce.jobsummary.log</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.JSA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.JSA.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.JSA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.JSA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+
+  <!--Only for HDP1.0 below-->
+  <property>
+    <name>log4j.appender.JSA.File</name>
+    <value>/var/log/hadoop/mapred/${hadoop.mapreduce.jobsummary.log.file}</value>
+  </property>
+
+  <property>
+    <name>log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary</name>
+    <value>${hadoop.mapreduce.jobsummary.logger}</value>
+  </property>
+
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary</name>
+    <value>false</value>
+  </property>
+
+  <!--Only for HDP1.0 above-->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
index 175d68c..643b64c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
@@ -15,30 +15,89 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.2.0.1.3.2.0</version>
 
-    <components>
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <comment>Apache Hadoop Distributed Processing Framework</comment>
+      <version>1.2.0.1.3.3.0</version>
+      <components>
         <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
+          <name>JOBTRACKER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/jobtracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/jobtracker.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
+          <name>TASKTRACKER</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/tasktracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>MAPREDUCE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality> 
+          <commandScript>
+            <script>scripts/client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+              
+        <component>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>MAPREDUCE/JOBTRACKER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>mapred-site</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+        <config-type>mapreduce-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/client.py
new file mode 100644
index 0000000..79c644d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Client(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  Client().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/historyserver.py
new file mode 100644
index 0000000..972a767
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/historyserver.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Historyserver(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    service('historyserver',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('historyserver',
+            action='stop'
+    )
+
+  def status(self, env):
+     import status_params
+     env.set_params(status_params)
+     check_process_status(status_params.historyserver_pid_file)
+
+if __name__ == "__main__":
+  Historyserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py
new file mode 100644
index 0000000..5cd41ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Jobtracker(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('jobtracker',
+            action='start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('jobtracker',
+            action='stop'
+    )
+    
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.jobtracker_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+
+    mapred_user = params.mapred_user
+    conf_dir = params.conf_dir
+    user_group = params.user_group
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=mapred_user,
+         group=user_group
+    )
+
+    ExecuteHadoop('mradmin -refreshNodes',
+                user=mapred_user,
+                conf_dir=conf_dir,
+                kinit_override=True)
+    pass
+
+if __name__ == "__main__":
+  Jobtracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/mapreduce.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/mapreduce.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/mapreduce.py
new file mode 100644
index 0000000..c5fd002
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/mapreduce.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def mapreduce():
+  import params
+
+  Directory([params.mapred_pid_dir,params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory(params.mapred_local_dir,
+            owner=params.mapred_user,
+            mode=0755,
+            recursive=True
+  )
+
+  File(params.exclude_file_path,
+            owner=params.mapred_user,
+            group=params.user_group,
+  )
+
+  File(params.mapred_hosts_file_path,
+            owner=params.mapred_user,
+            group=params.user_group,
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
new file mode 100644
index 0000000..d722124
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hadoop/conf"
+
+mapred_user = status_params.mapred_user
+pid_dir_prefix = status_params.pid_dir_prefix
+mapred_pid_dir = status_params.mapred_pid_dir
+
+historyserver_pid_file = status_params.historyserver_pid_file
+jobtracker_pid_file = status_params.jobtracker_pid_file
+tasktracker_pid_file = status_params.tasktracker_pid_file
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_bin = "/usr/lib/hadoop/bin"
+user_group = config['configurations']['global']['user_group']
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+mapred_log_dir = format("{hdfs_log_dir_prefix}/{mapred_user}")
+mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
+
+hadoop_jar_location = "/usr/lib/hadoop/"
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#exclude file
+mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
+exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
+mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service.py
new file mode 100644
index 0000000..f4aa91b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(
+    name,
+    action='start'):
+
+  import params
+
+  pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-{name}.pid")
+  hadoop_daemon = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {conf_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{cmd} start {name}")
+    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            user=params.mapred_user,
+            not_if=no_op
+    )
+
+    Execute(no_op,
+            user=params.mapred_user,
+            not_if=no_op,
+            initial_wait=5
+    )
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {name}")
+    rm_pid =  format("rm -f {pid_file}")
+
+    Execute(daemon_cmd,
+            user=params.mapred_user
+    )
+    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service_check.py
new file mode 100644
index 0000000..c0a4a59
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_location = params.hadoop_jar_location
+    input_file = 'mapredsmokeinput'
+    output_file = "mapredsmokeoutput"
+
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    create_file_cmd = format("{cleanup_cmd} ; hadoop dfs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_location}/hadoop-examples.jar wordcount {input_file} {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  conf_dir=params.conf_dir
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
new file mode 100644
index 0000000..f964a76
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+mapred_user = config['configurations']['global']['mapred_user']
+pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
+
+jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")
+tasktracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-tasktracker.pid")
+historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py
new file mode 100644
index 0000000..77d974b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Tasktracker(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('tasktracker',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('tasktracker',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.tasktracker_pid_file)
+
+if __name__ == "__main__":
+  Tasktracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..02fc5fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in mr_exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
index 42bee82..a4c500d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
@@ -16,19 +16,91 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>NAGIOS</name>
+      <comment>Nagios Monitoring and Alerting system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
             <name>NAGIOS_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
+            <commandScript>
+              <script>scripts/nagios_server.py</script>
+              <scriptType>PYTHON</scriptType>
+              <timeout>600</timeout>
+            </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>perl</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>perl-Net-SNMP</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-plugins-1.4.9</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-www-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>fping</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hdp_mon_nagios_addons</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>php5-json</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>redhat5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>oraclelinux5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php
new file mode 100644
index 0000000..f4063fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_aggregate.php
@@ -0,0 +1,243 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  }
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      }
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        }
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "SYSTEM":
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+
+/* JSON documment format */
+/*
+{
+  "programstatus":{
+    "last_command_check":"1327385743"
+  },
+  "hostcounts":{
+    "up_nodes":"",
+    "down_nodes":""
+  },
+  "hoststatus":[
+    {
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_state":"0",
+      "last_hard_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_check":"1327385564",
+      "current_attempt":"1",
+      "last_hard_state_change":"1327362079",
+      "last_time_up":"1327385574",
+      "last_time_down":"0",
+      "last_time_unreachable":"0",
+      "is_flapping":"0",
+      "last_check":"1327385574",
+      "servicestatus":[
+      ]
+    }
+  ],
+  "servicestatus":[
+    {
+      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
+      "service_description":"HDFS Current Load",
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_attempt":"1",
+      "current_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_hard_state_change":"1327362079",
+      "last_time_ok":"1327385479",
+      "last_time_warning":"0",
+      "last_time_unknown":"0",
+      "last_time_critical":"0",
+      "last_check":"1327385574",
+      "is_flapping":"0"
+    }
+  ]
+}
+*/
+
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl
new file mode 100644
index 0000000..a5680f7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_cpu.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+use strict;
+use Net::SNMP;
+use Getopt::Long;
+
+# Variable
+my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
+my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
+my $o_host = 	undef;
+my $o_community = undef;
+my $o_warn=	undef;
+my $o_crit=	undef;
+my $o_timeout = 15;
+my $o_port = 161;
+
+sub Usage {
+    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
+}
+
+Getopt::Long::Configure ("bundling");
+GetOptions(
+  'H:s'   => \$o_host,	
+  'C:s'   => \$o_community,	
+  'c:s'   => \$o_crit,        
+  'w:s'   => \$o_warn
+          );
+if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
+  Usage();
+  exit 3;
+}
+$o_warn =~ s/\%//g; 
+$o_crit =~ s/\%//g;
+alarm ($o_timeout);
+$SIG{'ALRM'} = sub {
+ print "Unable to contact host: $o_host\n";
+ exit 3;
+};
+
+# Connect to host
+my ($session,$error);
+($session, $error) = Net::SNMP->session(
+		-hostname  => $o_host,
+		-community => $o_community,
+		-port      => $o_port,
+		-timeout   => $o_timeout
+	  );
+if (!defined($session)) {
+   printf("Error opening session: %s.\n", $error);
+   exit 3;
+}
+
+my $exit_val=undef;
+my $resultat =  (Net::SNMP->VERSION < 4) ?
+	  $session->get_table($base_proc)
+	: $session->get_table(Baseoid => $base_proc);
+
+if (!defined($resultat)) {
+   printf("ERROR: Description table : %s.\n", $session->error);
+   $session->close;
+   exit 3;
+}
+
+$session->close;
+
+my ($cpu_used,$ncpu)=(0,0);
+foreach my $key ( keys %$resultat) {
+  if ($key =~ /$proc_load/) {
+    $cpu_used += $$resultat{$key};
+    $ncpu++;
+  }
+}
+
+if ($ncpu==0) {
+  print "Can't find CPU usage information : UNKNOWN\n";
+  exit 3;
+}
+
+$cpu_used /= $ncpu;
+
+print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
+printf(" %.1f%%",$cpu_used);
+$exit_val=0;
+
+if ($cpu_used > $o_crit) {
+ print " > $o_crit% : CRITICAL\n";
+ $exit_val=2;
+} else {
+  if ($cpu_used > $o_warn) {
+   print " > $o_warn% : WARNING\n";
+   $exit_val=1;
+  }
+}
+print " < $o_warn% : OK\n" if ($exit_val eq 0);
+exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php
new file mode 100644
index 0000000..dee22b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_datanode_storage.php
@@ -0,0 +1,100 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the storage capacity remaining on local datanode storage
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
+  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  }  
+  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
+
+  $out_msg = "Capacity:[" . $cap_total . 
+             "], Remaining Capacity:[" . $cap_remain . 
+             "], percent_full:[" . $percent_full  . "]";
+  
+  if ($percent_full > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent_full > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php
new file mode 100644
index 0000000..19347b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_blocks.php
@@ -0,0 +1,115 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the corrupt or missing blocks % is > threshod
+ * check_jmx -H hostaddress -p port -w 1% -c 1%
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $nn_jmx_property=$options['s'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['u'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $m_percent = 0;
+    $c_percent = 0;
+    $object = $json_array['beans'][0];
+    $missing_blocks = $object['MissingBlocks'];
+    $corrupt_blocks = $object['CorruptBlocks'];
+    $total_blocks = $object['BlocksTotal'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    if($total_blocks == 0) {
+      $m_percent = 0;
+      $c_percent = 0;
+    } else {
+      $m_percent = ($missing_blocks/$total_blocks)*100;
+      $c_percent = ($corrupt_blocks/$total_blocks)*100;
+      break;
+    }
+  }
+  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
+             ">, missing_blocks:<" . $missing_blocks .
+             ">, total_blocks:<" . $total_blocks . ">";
+
+  if ($m_percent > $crit || $c_percent > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($m_percent > $warn || $c_percent > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php
new file mode 100644
index 0000000..af72723
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hdfs_capacity.php
@@ -0,0 +1,109 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the % HDFS capacity used >= warn and critical limits.
+ * check_jmx -H hostaddress -p port -w 1 -c 1
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $percent = 0;
+    $object = $json_array['beans'][0];
+    $CapacityUsed = $object['CapacityUsed'];
+    $CapacityRemaining = $object['CapacityRemaining'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
+    if($CapacityTotal == 0) {
+      $percent = 0;
+    } else {
+      $percent = ($CapacityUsed/$CapacityTotal)*100;
+      break;
+    }
+  }
+  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
+             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
+
+  if ($percent >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh
new file mode 100644
index 0000000..640c077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hive_metastore_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#The uri is of the form thrift://<hostname>:<port>
+HOST=$1
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
+export JAVA_HOME=$JAVA_HOME
+out=`hcat $HCAT_URL -e "show databases" 2>&1`
+if [[ "$?" -ne 0 ]]; then
+  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
+  exit 2;
+fi
+echo "OK: Hive Metastore status OK";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh
new file mode 100644
index 0000000..076d9b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_hue_status.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+status=`/etc/init.d/hue status 2>&1`
+
+if [[ "$?" -ne 0 ]]; then
+	echo "WARNING: Hue is stopped";
+	exit 1;
+fi
+
+echo "OK: Hue is running";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
new file mode 100644
index 0000000..15c85eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+MAPRED_LOCAL_DIRS=$1
+CRITICAL=`echo $2 | cut -d % -f 1`
+IFS=","
+for mapred_dir in $MAPRED_LOCAL_DIRS
+do
+  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
+  if [ $percent -ge $CRITICAL ]; then
+    echo "CRITICAL: MapReduce local dir is full."
+    exit 2
+  fi
+done
+echo "OK: MapReduce local dir space is available."
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php
new file mode 100644
index 0000000..186166d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_name_dir_status.php
@@ -0,0 +1,93 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to namenode, get the jmx-json document
+ * check the NameDirStatuses to find any offline (failed) directories
+ * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
+ */
+ 
+  include "hdp_nagios_init.php";
+
+  $options = getopt("h:p:e:k:r:t:s:");
+  //Check only for mandatory options
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+  
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if ($object['NameDirStatuses'] == "") {
+    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
+    exit(1);
+  }
+  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
+  $failed_dir_count = count($NameDirStatuses['failed']);
+  $out_msg = "CRITICAL: Offline NameNode directories: ";
+  if ($failed_dir_count > 0) {
+    foreach ($NameDirStatuses['failed'] as $key => $value) {
+      $out_msg = $out_msg . $key . ":" . $value . ", ";
+    }
+    echo $out_msg . "\n";
+    exit (2);
+  }
+  echo "OK: All NameNode directories are active" . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
+  }
+?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh
new file mode 100644
index 0000000..50b075a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_namenodes_ha.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+IFS=',' read -a namenodes <<< "$1"
+port=$2
+totalNN=${#namenodes[@]}
+activeNN=()
+standbyNN=()
+unavailableNN=()
+
+for nn in "${namenodes[@]}"
+do
+  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
+  if [ "$status" == "active" ]; then
+    activeNN[${#activeNN[*]}]="$nn"
+  elif [ "$status" == "standby" ]; then
+    standbyNN[${#standbyNN[*]}]="$nn"
+  elif [ "$status" == "" ]; then
+    unavailableNN[${#unavailableNN[*]}]="$nn"
+  fi
+done
+
+message=""
+critical=false
+
+if [ ${#activeNN[@]} -gt 1 ]; then
+  critical=true
+  message=$message" Only one NN can have HAState=active;"
+elif [ ${#activeNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Active NN available;"
+elif [ ${#standbyNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Standby NN available;"
+fi
+
+NNstats=" Active<"
+for nn in "${activeNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Standby<"
+for nn in "${standbyNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Unavailable<"
+for nn in "${unavailableNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">"
+
+if [ $critical == false ]; then
+  echo "OK: NameNode HA healthy;"$NNstats
+  exit 0
+fi
+
+echo "CRITICAL:"$message$NNstats
+exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh
new file mode 100644
index 0000000..020b41d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_nodemanager_health.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HOST=$1
+PORT=$2
+NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
+SEC_ENABLED=$3
+export PATH="/usr/bin:$PATH"
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$4
+  NAGIOS_USER=$5
+  KINIT_PATH=$6
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+
+RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
+if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
+  echo "OK: NodeManager healthy";
+  exit 0;
+fi
+echo "CRITICAL: NodeManager unhealthy";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh
new file mode 100644
index 0000000..820ee99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_oozie_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# OOZIE_URL is of the form http://<hostname>:<port>/oozie
+HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+OOZIE_URL="http://$HOST:$PORT/oozie"
+export JAVA_HOME=$JAVA_HOME
+out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing Oozie Server status [$out]";
+  exit 2;
+fi
+echo "OK: Oozie Server status [$out]";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php
new file mode 100644
index 0000000..463f69b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_rpcq_latency.php
@@ -0,0 +1,104 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
+ * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
+ * Warning and Critical values are in seconds
+ * Service Name = JobTracker, NameNode, JobHistoryServer
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $master=$options['n'];
+  $warn=$options['w'];
+  $crit=$options['c'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  } 
+  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
+  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
+
+  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
+             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
+             "> Secs";
+
+  if ($RpcQueueTime_avg_time >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($RpcQueueTime_avg_time >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh
new file mode 100644
index 0000000..7fbc4c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_templeton_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# out='{"status":"ok","version":"v1"}<status_code:200>'
+HOST=$1
+PORT=$2
+VERSION=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then 
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+regex="^.*\"status\":\"ok\".*<status_code:200>$"
+out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
+if [[ $out =~ $regex ]]; then
+  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
+  echo "OK: WebHCat Server status [$out]";
+  exit 0;
+fi
+echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh
new file mode 100644
index 0000000..b23045e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/check_webui.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+checkurl () {
+  url=$1
+  curl $url -o /dev/null
+  echo $?
+}
+
+service=$1
+host=$2
+port=$3
+
+if [[ -z "$service" || -z "$host" ]]; then
+  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
+  exit 3;
+fi
+
+case "$service" in
+
+jobtracker) 
+    jtweburl="http://$host:$port"
+    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
+      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
+      exit 1;
+    fi
+    ;;
+namenode)
+    nnweburl="http://$host:$port"
+    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
+      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
+      exit 1;
+    fi
+    ;;
+jobhistory)
+    jhweburl="http://$host:$port/jobhistoryhome.jsp"
+    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
+      exit 1;
+    fi
+    ;;
+hbase)
+    hbaseweburl="http://$host:$port/master-status"
+    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
+      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
+      exit 1;
+    fi
+    ;;
+resourcemanager)
+    rmweburl="http://$host:$port/cluster"
+    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
+      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
+      exit 1;
+    fi
+    ;;
+historyserver2)
+    hsweburl="http://$host:$port/jobhistory"
+    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
+      exit 1;
+    fi
+    ;;
+*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
+   exit 3
+   ;;
+esac
+
+echo "OK: Successfully accessed $service Web UI"
+exit 0;


[05/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
new file mode 100644
index 0000000..6d12470
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
@@ -0,0 +1,444 @@
+{
+    "roleCommand": "INSTALL", 
+    "clusterName": "cl1", 
+    "hostname": "c6402.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-1.3.4\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\"}]", 
+        "package_list": "[{\"type\":\"rpm\",\"name\":\"lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop\"},{\"type\":\"rpm\",\"name\":\"hadoop-libhdfs\"},{\"type\":\"rpm\",\"name\":\"hadoop-native\"},{\"type\":\"rpm\",\"name\":\"hadoop-pipes\"},{\"type\":\"rpm\",\"name\":\"hadoop-sbin\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo-native\"},{\"type\":\"rpm\",\"name\":\"snappy\"},{\"type\":\"rpm\",\"name\":\"snappy-devel\"},{\"type\":\"rpm\",\"name\":\"ambari-log4j\"}]", 
+        "stack_version": "1.3.4", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "HDFS", 
+    "role": "DATANODE", 
+    "commandParams": {
+        "command_timeout": "600", 
+        "service_package_folder": "HDFS",
+        "script_type": "PYTHON", 
+        "schema_version": "2.0", 
+        "script": "scripts/datanode.py",
+        "excluded_hosts": "host1,host2"
+    }, 
+    "taskId": 18, 
+    "public_hostname": "c6402.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "ambari.mapred.child.java.opts.memory": "768", 
+            "mapred.job.reduce.input.buffer.percent": "0.0", 
+            "mapred.job.map.memory.mb": "1536", 
+            "mapred.output.compression.type": "BLOCK", 
+            "mapred.jobtracker.maxtasks.per.job": "-1", 
+            "mapred.hosts": "/etc/hadoop/conf/mapred.include", 
+            "mapred.map.output.compression.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
+            "mapred.child.root.logger": "INFO,TLA", 
+            "mapred.tasktracker.tasks.sleeptime-before-sigkill": "250", 
+            "io.sort.spill.percent": "0.9", 
+            "mapred.reduce.parallel.copies": "30", 
+            "mapred.userlog.retain.hours": "24", 
+            "mapred.reduce.tasks.speculative.execution": "false", 
+            "io.sort.mb": "200", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapred.jobtracker.blacklist.fault-timeout-window": "180", 
+            "mapred.job.tracker.history.completed.location": "/mapred/history/done", 
+            "mapred.job.shuffle.input.buffer.percent": "0.7", 
+            "io.sort.record.percent": ".2", 
+            "mapred.cluster.max.reduce.memory.mb": "4096", 
+            "mapred.job.reuse.jvm.num.tasks": "1", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapred.job.tracker.http.address": "c6402.ambari.apache.org:50030", 
+            "mapred.job.tracker.persist.jobstatus.hours": "1", 
+            "mapred.healthChecker.script.path": "/etc/hadoop/conf/health_check", 
+            "mapreduce.jobtracker.staging.root.dir": "/user", 
+            "mapred.job.shuffle.merge.percent": "0.66", 
+            "mapred.cluster.reduce.memory.mb": "2048", 
+            "mapred.job.tracker.persist.jobstatus.dir": "/mapred/jobstatus", 
+            "mapreduce.tasktracker.group": "hadoop", 
+            "mapred.tasktracker.map.tasks.maximum": "4", 
+            "mapred.child.java.opts": "-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true", 
+            "mapred.jobtracker.retirejob.check": "10000", 
+            "mapred.job.tracker": "c6402.ambari.apache.org:50300", 
+            "mapreduce.history.server.embedded": "false", 
+            "io.sort.factor": "100", 
+            "hadoop.job.history.user.location": "none", 
+            "mapreduce.reduce.input.limit": "10737418240", 
+            "mapred.reduce.slowstart.completed.maps": "0.05", 
+            "mapred.cluster.max.map.memory.mb": "6144", 
+            "mapreduce.history.server.http.address": "c6402.ambari.apache.org:51111", 
+            "mapred.jobtracker.taskScheduler": "org.apache.hadoop.mapred.CapacityTaskScheduler", 
+            "mapred.max.tracker.blacklists": "16", 
+            "mapred.local.dir": "/hadoop/mapred", 
+            "mapred.healthChecker.interval": "135000", 
+            "mapred.jobtracker.restart.recover": "false", 
+            "mapred.jobtracker.blacklist.fault-bucket-width": "15", 
+            "mapred.jobtracker.retirejob.interval": "21600000", 
+            "tasktracker.http.threads": "50", 
+            "mapred.job.tracker.persist.jobstatus.active": "false", 
+            "mapred.system.dir": "/mapred/system", 
+            "mapred.tasktracker.reduce.tasks.maximum": "2", 
+            "mapred.cluster.map.memory.mb": "1536", 
+            "mapred.hosts.exclude": "/etc/hadoop/conf/mapred.exclude", 
+            "mapred.queue.names": "default", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.fileoutputcommitter.marksuccessfuljobs": "false", 
+            "mapred.job.reduce.memory.mb": "2048", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapred.healthChecker.script.timeout": "60000", 
+            "jetty.connector": "org.mortbay.jetty.nio.SelectChannelConnector", 
+            "mapreduce.jobtracker.split.metainfo.maxsize": "50000000", 
+            "mapred.job.tracker.handler.count": "50", 
+            "mapred.inmem.merge.threshold": "1000", 
+            "mapred.task.tracker.task-controller": "org.apache.hadoop.mapred.DefaultTaskController", 
+            "mapred.jobtracker.completeuserjobs.maximum": "0", 
+            "mapred.task.timeout": "600000", 
+            "mapred.map.tasks.speculative.execution": "false"
+        }, 
+        "oozie-site": {
+            "oozie.service.PurgeService.purge.interval": "3600", 
+            "oozie.service.CallableQueueService.queue.size": "1000", 
+            "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd", 
+            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
+            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
+            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
+            "oozie.service.JPAService.create.db.schema": "false", 
+            "oozie.authentication.kerberos.name.rules": "DEFAULT", 
+            "oozie.service.ActionService.executor.ext.classes": "org.apache.oozie.action.email.EmailActionExecutor,\norg.apache.oozie.action.hadoop.HiveActionExecutor,\norg.apache.oozie.action.hadoop.ShellActionExecutor,\norg.apache.oozie.action.hadoop.SqoopActionExecutor,\norg.apache.oozie.action.hadoop.DistcpActionExecutor", 
+            "oozie.service.AuthorizationService.authorization.enabled": "true", 
+            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
+            "oozie.service.JPAService.jdbc.password": "q", 
+            "oozie.service.coord.normal.default.timeout": "120", 
+            "oozie.service.JPAService.pool.max.active.conn": "10", 
+            "oozie.service.PurgeService.older.than": "30", 
+            "oozie.db.schema.name": "oozie", 
+            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
+            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
+            "oozie.service.CallableQueueService.callable.concurrency": "3", 
+            "oozie.service.JPAService.jdbc.username": "oozie", 
+            "oozie.service.CallableQueueService.threads": "10", 
+            "oozie.systemmode": "NORMAL", 
+            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
+            "oozie.authentication.type": "simple", 
+            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "oozie.system.id": "oozie-${user.name}"
+        }, 
+        "webhcat-site": {
+            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
+            "templeton.exec.timeout": "60000", 
+            "templeton.override.enabled": "false", 
+            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
+            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181", 
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse", 
+            "templeton.storage.class": "org.apache.hcatalog.templeton.tool.ZooKeeperStorage", 
+            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
+            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
+            "templeton.port": "50111", 
+            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
+            "templeton.hadoop": "/usr/bin/hadoop", 
+            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
+            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
+            "templeton.hcat": "/usr/bin/hcat", 
+            "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz"
+        }, 
+        "global": {
+            "security_enabled": "false", 
+            "hbase_pid_dir": "/var/run/hbase", 
+            "proxyuser_group": "users", 
+            "zk_user": "zookeeper", 
+            "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
+            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
+            "syncLimit": "5", 
+            "oozie_pid_dir": "/var/run/oozie", 
+            "hbase_regionserver_heapsize": "1024m", 
+            "dtnode_heapsize": "1024m", 
+            "jtnode_heapsize": "1024m", 
+            "hcat_log_dir": "/var/log/webhcat", 
+            "oozie_hostname": "c6402.ambari.apache.org", 
+            "hive_aux_jars_path": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
+            "tickTime": "2000", 
+            "hive_ambari_database": "MySQL", 
+            "rca_enabled": "true", 
+            "namenode_heapsize": "1024m", 
+            "oozie_log_dir": "/var/log/oozie", 
+            "hive_jdbc_driver": "com.mysql.jdbc.Driver", 
+            "oozie_user": "oozie", 
+            "oozie_data_dir": "/hadoop/oozie/data", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "lzo_enabled": "true", 
+            "namenode_opt_maxnewsize": "200m", 
+            "smokeuser": "ambari-qa", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hive_hostname": "c6402.ambari.apache.org", 
+            "hive_metastore_port": "9083", 
+            "hbase_master_heapsize": "1024m", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "hcat_pid_dir": "/etc/run/webhcat", 
+            "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "initLimit": "10", 
+            "hive_database_type": "mysql", 
+            "oozie_database": "New Derby Database", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "user_group": "hadoop", 
+            "hive_user": "hive", 
+            "gmond_user": "nobody", 
+            "nagios_web_login": "nagiosadmin", 
+            "nagios_contact": "q@q.q", 
+            "hive_database": "New MySQL Database", 
+            "nagios_web_password": "q", 
+            "clientPort": "2181", 
+            "oozie_derby_database": "Derby", 
+            "snappy_enabled": "true", 
+            "ganglia_conf_dir": "/etc/ganglia/hdp", 
+            "hdfs_user": "hdfs", 
+            "hbase_user": "hbase", 
+            "oozie_database_type": "derby", 
+            "webhcat_user": "hcat", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "jtnode_opt_maxnewsize": "200m", 
+            "mysql_connector_url": "${download_url}/mysql-connector-java-5.1.18.zip", 
+            "gmetad_user": "nobody", 
+            "hive_log_dir": "/var/log/hive", 
+            "jtnode_opt_newsize": "200m", 
+            "namenode_opt_newsize": "200m", 
+            "mapred_user": "mapred", 
+            "nagios_group": "nagios", 
+            "hive_pid_dir": "/var/run/hive", 
+            "hcat_user": "hcat", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "nagios_user": "nagios", 
+            "hbase_log_dir": "/var/log/hbase"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.access.time.precision": "0", 
+            "ipc.server.max.response.size": "5242880", 
+            "dfs.web.ugi": "gopher,gopher", 
+            "dfs.support.append": "true", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.replication": "3", 
+            "ambari.dfs.datanode.http.port": "50075", 
+            "dfs.block.size": "134217728", 
+            "dfs.data.dir": "/hadoop/hdfs/data", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", 
+            "dfs.datanode.socket.write.timeout": "0", 
+            "ipc.server.read.threadpool.size": "5", 
+            "dfs.balance.bandwidthPerSec": "6250000", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.permissions.supergroup": "hdfs", 
+            "dfs.https.address": "c6401.ambari.apache.org:50470", 
+            "ambari.dfs.datanode.port": "50010", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.hosts": "/etc/hadoop/conf/dfs.include", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.secondary.https.port": "50490", 
+            "dfs.permissions": "true", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.block.local-path-access.user": "hbase", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.secondary.http.address": "c6402.ambari.apache.org:50090", 
+            "dfs.http.address": "c6401.ambari.apache.org:50070", 
+            "dfs.https.port": "50070", 
+            "dfs.replication.max": "50", 
+            "dfs.datanode.max.xcievers": "4096", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.du.pct": "0.85f", 
+            "dfs.safemode.threshold.pct": "1.0f", 
+            "dfs.umaskmode": "077"
+        }, 
+        "hbase-site": {
+            "hbase.client.keyvalue.maxsize": "10485760", 
+            "hbase.hstore.compactionThreshold": "3", 
+            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
+            "hbase.regionserver.handler.count": "60", 
+            "dfs.client.read.shortcircuit": "true", 
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
+            "hbase.hregion.memstore.block.multiplier": "2", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.superuser": "hbase", 
+            "hbase.zookeeper.property.clientPort": "2181", 
+            "hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.WritableRpcEngine", 
+            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
+            "zookeeper.session.timeout": "60000", 
+            "hbase.tmp.dir": "/hadoop/hbase", 
+            "hbase.hregion.max.filesize": "10737418240", 
+            "hfile.block.cache.size": "0.40", 
+            "hbase.security.authentication": "simple", 
+            "hbase.zookeeper.quorum": "c6401.ambari.apache.org", 
+            "zookeeper.znode.parent": "/hbase-unsecure", 
+            "hbase.hstore.blockingStoreFiles": "10", 
+            "hbase.hregion.majorcompaction": "86400000", 
+            "hbase.security.authorization": "false", 
+            "hbase.cluster.distributed": "true", 
+            "hbase.hregion.memstore.mslab.enabled": "true", 
+            "hbase.client.scanner.caching": "100", 
+            "hbase.zookeeper.useMulti": "true"
+        }, 
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "hadoop.proxyuser.hcat.groups": "users", 
+            "fs.checkpoint.size": "67108864", 
+            "hadoop.proxyuser.oozie.groups": "users", 
+            "fs.default.name": "hdfs://c6401.ambari.apache.org:8020", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.proxyuser.hive.groups": "users", 
+            "webinterface.private.actions": "false", 
+            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "hadoop.security.authentication": "simple", 
+            "fs.checkpoint.edits.dir": "${fs.checkpoint.dir}", 
+            "fs.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.trash.interval": "360", 
+            "ipc.client.idlethreshold": "8000", 
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
+            "io.compression.codec.lzo.class": "com.hadoop.compression.lzo.LzoCodec", 
+            "fs.checkpoint.period": "21600", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "ipc.client.connect.max.retries": "50"
+        }, 
+        "hive-site": {
+            "hive.enforce.sorting": "true", 
+            "javax.jdo.option.ConnectionPassword": "q", 
+            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
+            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
+            "fs.file.impl.disable.cache": "true", 
+            "hive.auto.convert.join.noconditionaltask": "true", 
+            "hive.map.aggr": "true", 
+            "hive.security.authorization.enabled": "false", 
+            "hive.optimize.reducededuplication.min.reducer": "1", 
+            "hive.optimize.bucketmapjoin": "true", 
+            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
+            "hive.mapjoin.bucket.cache.size": "10000", 
+            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
+            "javax.jdo.option.ConnectionUserName": "hive", 
+            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
+            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
+            "hive.metastore.client.socket.timeout": "60", 
+            "hive.semantic.analyzer.factory.impl": "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory", 
+            "hive.auto.convert.join": "true", 
+            "hive.enforce.bucketing": "true", 
+            "hive.mapred.reduce.tasks.speculative.execution": "false", 
+            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
+            "hive.auto.convert.sortmerge.join": "true", 
+            "fs.hdfs.impl.disable.cache": "true", 
+            "hive.security.authorization.manager": "org.apache.hcatalog.security.HdfsAuthorizationProvider", 
+            "ambari.hive.db.schema.name": "hive", 
+            "hive.metastore.execute.setugi": "true", 
+            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
+            "hive.server2.enable.doAs": "true", 
+            "hive.optimize.mapjoin.mapreduce": "true"
+        }
+    }, 
+    "configurationTags": {
+        "mapred-site": {
+            "tag": "version1"
+        }, 
+        "oozie-site": {
+            "tag": "version1"
+        }, 
+        "webhcat-site": {
+            "tag": "version1"
+        }, 
+        "global": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1"
+        }, 
+        "hbase-site": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1"
+        }, 
+        "hive-site": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "1-1", 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "ganglia_monitor_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "nagios_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hive_metastore_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670"
+        ], 
+        "mapred_tt_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hbase_rs_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ganglia_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hbase_master_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "hive_mysql_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "oozie_server": [
+            "c6402.ambari.apache.org"
+        ], 
+        "webhcat_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "jtnode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hive_server_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
new file mode 100644
index 0000000..9520b02
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
@@ -0,0 +1,549 @@
+{
+    "roleCommand": "START", 
+    "clusterName": "cl1", 
+    "hostname": "c6402.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-1.3.4\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\"}]", 
+        "package_list": "[{\"type\":\"rpm\",\"name\":\"hive\"},{\"type\":\"rpm\",\"name\":\"mysql-connector-java\"},{\"type\":\"rpm\",\"name\":\"mysql\"},{\"type\":\"rpm\",\"name\":\"mysql-server\"}]", 
+        "stack_version": "1.3.4", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "HIVE", 
+    "role": "MYSQL_SERVER", 
+    "commandParams": {
+        "command_timeout": "600", 
+        "service_package_folder": "HIVE",
+        "script_type": "PYTHON", 
+        "schema_version": "2.0", 
+        "script": "scripts/mysql_server.py",
+        "excluded_hosts": "host1"
+    }, 
+    "taskId": 117, 
+    "public_hostname": "c6402.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "ambari.mapred.child.java.opts.memory": "768", 
+            "mapred.job.reduce.input.buffer.percent": "0.0", 
+            "mapred.job.map.memory.mb": "1536", 
+            "mapred.output.compression.type": "BLOCK", 
+            "mapred.jobtracker.maxtasks.per.job": "-1", 
+            "mapreduce.jobtracker.keytab.file": "/etc/security/keytabs/jt.service.keytab", 
+            "mapred.map.output.compression.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
+            "mapred.child.root.logger": "INFO,TLA", 
+            "mapred.tasktracker.tasks.sleeptime-before-sigkill": "250", 
+            "io.sort.spill.percent": "0.9", 
+            "mapred.reduce.parallel.copies": "30", 
+            "mapred.userlog.retain.hours": "24", 
+            "mapred.reduce.tasks.speculative.execution": "false", 
+            "mapred.healthChecker.interval": "135000", 
+            "io.sort.mb": "200", 
+            "mapreduce.jobtracker.kerberos.principal": "jt/_HOST@EXAMPLE.COM", 
+            "mapred.jobtracker.blacklist.fault-timeout-window": "180", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapred.job.shuffle.input.buffer.percent": "0.7", 
+            "mapred.job.tracker.history.completed.location": "/mapred/history/done", 
+            "io.sort.record.percent": ".2", 
+            "mapred.cluster.max.reduce.memory.mb": "4096", 
+            "mapred.job.reuse.jvm.num.tasks": "1", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapred.job.tracker.http.address": "c6402.ambari.apache.org:50030", 
+            "mapred.job.tracker.persist.jobstatus.hours": "1", 
+            "mapred.healthChecker.script.path": "/etc/hadoop/conf/health_check", 
+            "mapreduce.jobtracker.staging.root.dir": "/user", 
+            "mapred.job.shuffle.merge.percent": "0.66", 
+            "mapred.cluster.reduce.memory.mb": "2048", 
+            "mapred.job.tracker.persist.jobstatus.dir": "/mapred/jobstatus", 
+            "mapreduce.tasktracker.group": "hadoop", 
+            "mapred.tasktracker.map.tasks.maximum": "4", 
+            "mapred.child.java.opts": "-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true", 
+            "mapreduce.jobhistory.keytab.file": "/etc/security/keytabs/jt.service.keytab", 
+            "mapred.jobtracker.retirejob.check": "10000", 
+            "mapred.job.tracker": "c6402.ambari.apache.org:50300", 
+            "mapreduce.history.server.embedded": "false", 
+            "io.sort.factor": "100", 
+            "hadoop.job.history.user.location": "none", 
+            "mapreduce.reduce.input.limit": "10737418240", 
+            "mapred.reduce.slowstart.completed.maps": "0.05", 
+            "mapred.cluster.max.map.memory.mb": "6144", 
+            "mapreduce.tasktracker.keytab.file": "/etc/security/keytabs/tt.service.keytab", 
+            "mapred.jobtracker.taskScheduler": "org.apache.hadoop.mapred.CapacityTaskScheduler", 
+            "mapred.max.tracker.blacklists": "16", 
+            "mapreduce.tasktracker.kerberos.principal": "tt/_HOST@EXAMPLE.COM", 
+            "mapred.local.dir": "/hadoop/mapred", 
+            "mapreduce.history.server.http.address": "c6402.ambari.apache.org:51111", 
+            "mapred.jobtracker.restart.recover": "false", 
+            "mapred.jobtracker.blacklist.fault-bucket-width": "15", 
+            "mapred.jobtracker.retirejob.interval": "21600000", 
+            "tasktracker.http.threads": "50", 
+            "mapred.job.tracker.persist.jobstatus.active": "false", 
+            "mapred.system.dir": "/mapred/system", 
+            "mapred.tasktracker.reduce.tasks.maximum": "2", 
+            "mapred.cluster.map.memory.mb": "1536", 
+            "mapred.hosts.exclude": "/etc/hadoop/conf/mapred.exclude", 
+            "mapred.queue.names": "default", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.fileoutputcommitter.marksuccessfuljobs": "false", 
+            "mapred.job.reduce.memory.mb": "2048", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapred.healthChecker.script.timeout": "60000", 
+            "jetty.connector": "org.mortbay.jetty.nio.SelectChannelConnector", 
+            "mapreduce.jobtracker.split.metainfo.maxsize": "50000000", 
+            "mapred.job.tracker.handler.count": "50", 
+            "mapred.inmem.merge.threshold": "1000", 
+            "mapred.hosts": "/etc/hadoop/conf/mapred.include", 
+            "mapred.task.tracker.task-controller": "org.apache.hadoop.mapred.LinuxTaskController", 
+            "mapred.jobtracker.completeuserjobs.maximum": "0", 
+            "mapred.task.timeout": "600000", 
+            "mapreduce.jobhistory.kerberos.principal": "jt/_HOST@EXAMPLE.COM", 
+            "mapred.map.tasks.speculative.execution": "false"
+        }, 
+        "oozie-site": {
+            "oozie.service.PurgeService.purge.interval": "3600", 
+            "oozie.service.CallableQueueService.queue.size": "1000", 
+            "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd", 
+            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
+            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
+            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "local.realm": "EXAMPLE.COM", 
+            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
+            "oozie.service.HadoopAccessorService.kerberos.enabled": "true", 
+            "oozie.service.JPAService.create.db.schema": "false", 
+            "oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0](jt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](tt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nDEFAULT", 
+            "oozie.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "oozie.service.ActionService.executor.ext.classes": "org.apache.oozie.action.email.EmailActionExecutor,\norg.apache.oozie.action.hadoop.HiveActionExecutor,\norg.apache.oozie.action.hadoop.ShellActionExecutor,\norg.apache.oozie.action.hadoop.SqoopActionExecutor,\norg.apache.oozie.action.hadoop.DistcpActionExecutor", 
+            "oozie.service.HadoopAccessorService.kerberos.principal": "oozie/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "oozie.service.AuthorizationService.authorization.enabled": "true", 
+            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
+            "oozie.service.JPAService.jdbc.password": "q", 
+            "oozie.service.coord.normal.default.timeout": "120", 
+            "oozie.service.JPAService.pool.max.active.conn": "10", 
+            "oozie.service.PurgeService.older.than": "30", 
+            "oozie.db.schema.name": "oozie", 
+            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
+            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
+            "oozie.service.CallableQueueService.callable.concurrency": "3", 
+            "oozie.service.JPAService.jdbc.username": "oozie", 
+            "oozie.service.CallableQueueService.threads": "10", 
+            "oozie.systemmode": "NORMAL", 
+            "oozie.service.HadoopAccessorService.keytab.file": "/etc/security/keytabs/oozie.service.keytab", 
+            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
+            "oozie.authentication.type": "kerberos", 
+            "oozie.authentication.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "oozie.system.id": "oozie-${user.name}"
+        }, 
+        "webhcat-site": {
+            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@EXAMPLE.COM", 
+            "templeton.override.enabled": "false", 
+            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
+            "templeton.kerberos.secret": "secret", 
+            "templeton.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181", 
+            "templeton.exec.timeout": "60000", 
+            "templeton.storage.class": "org.apache.hcatalog.templeton.tool.ZooKeeperStorage", 
+            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
+            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
+            "templeton.port": "50111", 
+            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
+            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
+            "templeton.hadoop": "/usr/bin/hadoop", 
+            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
+            "templeton.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "templeton.hcat": "/usr/bin/hcat", 
+            "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz"
+        }, 
+        "global": {
+            "tasktracker_task_controller": "org.apache.hadoop.mapred.LinuxTaskController", 
+            "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab", 
+            "hadoop_http_principal_name": "HTTP/_HOST", 
+            "kinit_path_local": "/usr/bin", 
+            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
+            "hbase_regionserver_heapsize": "1024m", 
+            "datanode_primary_name": "dn", 
+            "namenode_principal_name": "nn/_HOST", 
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
+            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM", 
+            "dfs_datanode_http_address": "1022", 
+            "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
+            "jobtracker_primary_name": "jt", 
+            "hbase_pid_dir": "/var/run/hbase", 
+            "namenode_opt_maxnewsize": "200m", 
+            "syncLimit": "5", 
+            "clientPort": "2181", 
+            "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "hive_metastore_primary_name": "hive", 
+            "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
+            "nagios_primary_name": "nagios", 
+            "jobtracker_principal_name": "jt/_HOST", 
+            "hive_database": "New MySQL Database", 
+            "hcat_pid_dir": "/etc/run/webhcat", 
+            "oozie_derby_database": "Derby", 
+            "snappy_enabled": "true", 
+            "oozie_pid_dir": "/var/run/oozie", 
+            "datanode_principal_name": "dn/_HOST", 
+            "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
+            "nagios_group": "nagios", 
+            "hcat_user": "hcat", 
+            "hadoop_heapsize": "1024", 
+            "hbase_regionserver_primary_name": "hbase", 
+            "zk_user": "zookeeper", 
+            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
+            "keytab_path": "/etc/security/keytabs", 
+            "hive_pid_dir": "/var/run/hive", 
+            "webhcat_server": "c6402.ambari.apache.org", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "hcat_log_dir": "/var/log/webhcat", 
+            "oozie_hostname": "c6402.ambari.apache.org", 
+            "tasktracker_principal_name": "tt/_HOST", 
+            "jobtracker_keytab": "/etc/security/keytabs/jt.service.keytab", 
+            "tasktracker_keytab": "/etc/security/keytabs/tt.service.keytab", 
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
+            "namenode_heapsize": "1024m", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
+            "nagios_server": "c6402.ambari.apache.org", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "lzo_enabled": "true", 
+            "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
+            "hive_jdbc_driver": "com.mysql.jdbc.Driver", 
+            "dfs_datanode_address": "1019", 
+            "namenode_opt_newsize": "200m", 
+            "initLimit": "10", 
+            "hive_database_type": "mysql", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "namenode_primary_name": "nn", 
+            "tickTime": "2000", 
+            "hive_metastore_principal_name": "hive/_HOST", 
+            "datanode_keytab": "/etc/security/keytabs/dn.service.keytab", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "oozie_http_principal_name": "HTTP/c6402.ambari.apache.org", 
+            "tasktracker_primary_name": "tt", 
+            "hadoop_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "gmetad_user": "nobody", 
+            "oozie_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "hive_metastore": "c6402.ambari.apache.org", 
+            "nagios_user": "nagios", 
+            "security_enabled": "true", 
+            "proxyuser_group": "users", 
+            "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
+            "hbase_primary_name": "hbase", 
+            "oozie_http_primary_name": "HTTP", 
+            "dtnode_heapsize": "1024m", 
+            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM", 
+            "oozie_log_dir": "/var/log/oozie", 
+            "webhcat_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "oozie_user": "oozie", 
+            "oozie_data_dir": "/hadoop/oozie/data", 
+            "oozie_primary_name": "oozie", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "zookeeper_primary_name": "zookeeper", 
+            "hbase_master_principal_name": "hbase/_HOST", 
+            "jtnode_heapsize": "1024m", 
+            "yarn_user": "yarn", 
+            "gmond_user": "nobody", 
+            "nagios_web_login": "nagiosadmin", 
+            "nagios_contact": "q@q.q", 
+            "snamenode_primary_name": "nn", 
+            "hdfs_user": "hdfs", 
+            "oozie_database_type": "derby", 
+            "webhcat_user": "hcat", 
+            "hive_hostname": "c6402.ambari.apache.org", 
+            "hbase_regionserver_principal_name": "hbase/_HOST", 
+            "hive_log_dir": "/var/log/hive", 
+            "smokeuser_principal_name": "ambari-qa", 
+            "mapred_user": "mapred", 
+            "smokeuser_primary_name": "ambari-qa", 
+            "jtnode_opt_maxnewsize": "200m", 
+            "hbase_master_primary_name": "hbase", 
+            "oozie_servername": "c6402.ambari.apache.org", 
+            "hdfs_primary_name": "hdfs", 
+            "hive_ambari_database": "MySQL", 
+            "rca_enabled": "true", 
+            "hadoop_http_primary_name": "HTTP", 
+            "webHCat_http_principal_name": "HTTP/c6402.ambari.apache.org", 
+            "mysql_connector_url": "${download_url}/mysql-connector-java-5.1.18.zip", 
+            "hive_metastore_port": "9083", 
+            "hbase_user": "hbase", 
+            "snamenode_principal_name": "nn/_HOST", 
+            "oozie_database": "New Derby Database", 
+            "hbase_log_dir": "/var/log/hbase", 
+            "user_group": "hadoop", 
+            "hive_user": "hive", 
+            "webHCat_http_primary_name": "HTTP", 
+            "nagios_web_password": "q", 
+            "smokeuser": "ambari-qa", 
+            "ganglia_conf_dir": "/etc/ganglia/hdp", 
+            "hbase_master_heapsize": "1024m", 
+            "kerberos_install_type": "MANUALLY_SET_KERBEROS", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "hive_aux_jars_path": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
+            "jtnode_opt_newsize": "200m", 
+            "hbase_regionserver_keytab": "/etc/security/keytabs/hbase.service.keytab", 
+            "hbase_principal_name": "hbase", 
+            "hdfs_principal_name": "hdfs"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
+            "ipc.server.max.response.size": "5242880", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.cluster.administrators": " hdfs", 
+            "ambari.dfs.datanode.http.port": "1022", 
+            "dfs.block.size": "134217728", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.hosts": "/etc/hadoop/conf/dfs.include", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.socket.write.timeout": "0", 
+            "ipc.server.read.threadpool.size": "5", 
+            "dfs.balance.bandwidthPerSec": "6250000", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.permissions.supergroup": "hdfs", 
+            "dfs.secondary.http.address": "c6402.ambari.apache.org:50090", 
+            "ambari.dfs.datanode.port": "1019", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.access.time.precision": "0", 
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
+            "dfs.https.address": "c6401.ambari.apache.org:50470", 
+            "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", 
+            "dfs.data.dir": "/hadoop/hdfs/data", 
+            "dfs.secondary.https.port": "50490", 
+            "dfs.permissions": "true", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.block.local-path-access.user": "hbase", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.web.ugi": "gopher,gopher", 
+            "dfs.datanode.du.pct": "0.85f", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.http.address": "c6401.ambari.apache.org:50070", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.https.port": "50070", 
+            "dfs.replication.max": "50", 
+            "dfs.datanode.max.xcievers": "4096", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.safemode.threshold.pct": "1.0f", 
+            "dfs.umaskmode": "077"
+        }, 
+        "hbase-site": {
+            "hbase.client.keyvalue.maxsize": "10485760", 
+            "hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab", 
+            "hbase.hstore.compactionThreshold": "3", 
+            "hbase.zookeeper.property.clientPort": "2181", 
+            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
+            "hbase.regionserver.handler.count": "60", 
+            "dfs.client.read.shortcircuit": "true", 
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging", 
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
+            "hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", 
+            "hbase.hregion.memstore.block.multiplier": "2", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.superuser": "hbase", 
+            "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController", 
+            "hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine", 
+            "hbase.hregion.max.filesize": "10737418240", 
+            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
+            "zookeeper.session.timeout": "60000", 
+            "hbase.tmp.dir": "/hadoop/hbase", 
+            "hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", 
+            "hfile.block.cache.size": "0.40", 
+            "hbase.security.authentication": "kerberos", 
+            "hbase.zookeeper.quorum": "c6401.ambari.apache.org", 
+            "zookeeper.znode.parent": "/hbase-secure", 
+            "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController", 
+            "hbase.hstore.blockingStoreFiles": "10", 
+            "hbase.hregion.majorcompaction": "86400000", 
+            "hbase.security.authorization": "true", 
+            "hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab", 
+            "hbase.cluster.distributed": "true", 
+            "hbase.hregion.memstore.mslab.enabled": "true", 
+            "hbase.client.scanner.caching": "100", 
+            "hbase.zookeeper.useMulti": "true"
+        }, 
+        "core-site": {
+            "fs.default.name": "hdfs://c6401.ambari.apache.org:8020", 
+            "hadoop.proxyuser.HTTP.groups": "users", 
+            "hadoop.proxyuser.HTTP.hosts": "c6402.ambari.apache.org", 
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
+            "fs.checkpoint.period": "21600", 
+            "hadoop.proxyuser.hcat.groups": "users", 
+            "fs.checkpoint.size": "67108864", 
+            "fs.trash.interval": "360", 
+            "hadoop.proxyuser.hive.groups": "users", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "hadoop.security.authentication": "kerberos", 
+            "fs.checkpoint.edits.dir": "${fs.checkpoint.dir}", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "io.compression.codec.lzo.class": "com.hadoop.compression.lzo.LzoCodec", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "webinterface.private.actions": "false", 
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
+            "hadoop.proxyuser.oozie.groups": "users", 
+            "hadoop.security.authorization": "true", 
+            "fs.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "RULE:[2:$1@$0](jt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](tt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nDEFAULT", 
+            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
+            "ipc.client.connection.maxidletime": "30000"
+        }, 
+        "hive-site": {
+            "hive.enforce.sorting": "true", 
+            "javax.jdo.option.ConnectionPassword": "q", 
+            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
+            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
+            "fs.file.impl.disable.cache": "true", 
+            "hive.auto.convert.join.noconditionaltask": "true", 
+            "hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM", 
+            "hive.optimize.bucketmapjoin": "true", 
+            "hive.map.aggr": "true", 
+            "hive.security.authorization.enabled": "true", 
+            "hive.optimize.reducededuplication.min.reducer": "1", 
+            "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab", 
+            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
+            "hive.mapjoin.bucket.cache.size": "10000", 
+            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
+            "javax.jdo.option.ConnectionUserName": "hive", 
+            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
+            "hive.server2.authentication": "KERBEROS", 
+            "hive.metastore.sasl.enabled": "true", 
+            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
+            "hive.metastore.client.socket.timeout": "60", 
+            "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM", 
+            "hive.semantic.analyzer.factory.impl": "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory", 
+            "hive.auto.convert.join": "true", 
+            "hive.enforce.bucketing": "true", 
+            "hive.mapred.reduce.tasks.speculative.execution": "false", 
+            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
+            "hive.auto.convert.sortmerge.join": "true", 
+            "fs.hdfs.impl.disable.cache": "true", 
+            "hive.security.authorization.manager": "org.apache.hcatalog.security.HdfsAuthorizationProvider", 
+            "ambari.hive.db.schema.name": "hive", 
+            "hive.metastore.execute.setugi": "true", 
+            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
+            "hive.server2.enable.doAs": "true", 
+            "hive.optimize.mapjoin.mapreduce": "true", 
+            "hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab"
+        }
+    }, 
+    "configurationTags": {
+        "mapred-site": {
+            "tag": "version1389980437965"
+        }, 
+        "oozie-site": {
+            "tag": "version1389980437966"
+        }, 
+        "webhcat-site": {
+            "tag": "version1389980437965"
+        }, 
+        "global": {
+            "tag": "version1389980437965"
+        }, 
+        "hdfs-site": {
+            "tag": "version1389980437965"
+        }, 
+        "hbase-site": {
+            "tag": "version1389980437965"
+        }, 
+        "core-site": {
+            "tag": "version1389980437965"
+        }, 
+        "hive-site": {
+            "tag": "version1389980437965"
+        }
+    }, 
+    "commandId": "4-2", 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "ganglia_monitor_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "nagios_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hive_metastore_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670"
+        ], 
+        "mapred_tt_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hbase_rs_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ganglia_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hbase_master_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "hive_mysql_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "oozie_server": [
+            "c6402.ambari.apache.org"
+        ], 
+        "webhcat_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "jtnode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hive_server_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
new file mode 100644
index 0000000..a0850a2
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+
+class TestHookBeforeInstall(RMFTestCase):
+  def test_hook_default(self):
+    self.executeScript("1.3.2/hooks/before-INSTALL/scripts/hook.py",
+                       classname="BeforeConfigureHook",
+                       command="hook",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Group', 'hadoop',)
+    self.assertResourceCalled('Group', 'users',)
+    self.assertResourceCalled('Group', 'users',)
+    self.assertResourceCalled('User', 'ambari-qa',
+                              gid='hadoop',
+                              groups=[u'users'],)
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content=StaticFile('changeToSecureUid.sh'),
+                              mode=0555,)
+    self.assertResourceCalled('Execute',
+                              '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 2>/dev/null',
+                              not_if='test $(id -u ambari-qa) -gt 1000',)
+    self.assertResourceCalled('User', 'hbase',
+                              gid='hadoop',
+                              groups=[u'hadoop'],)
+    self.assertResourceCalled('File', '/tmp/changeUid.sh',
+                              content=StaticFile('changeToSecureUid.sh'),
+                              mode=0555,)
+    self.assertResourceCalled('Execute',
+                              '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/hadoop/hbase 2>/dev/null',
+                              not_if='test $(id -u hbase) -gt 1000',)
+    self.assertResourceCalled('Group', 'nagios',)
+    self.assertResourceCalled('User', 'nagios', gid='nagios',)
+    self.assertResourceCalled('User', 'oozie', gid='hadoop',)
+    self.assertResourceCalled('User', 'hcat', gid='hadoop',)
+    self.assertResourceCalled('User', 'hcat', gid='hadoop',)
+    self.assertResourceCalled('User', 'hive',
+                              gid='hadoop',)
+    self.assertResourceCalled('Group', 'nobody',)
+    self.assertResourceCalled('Group', 'nobody',)
+    self.assertResourceCalled('User', 'nobody',
+                              gid='hadoop',
+                              groups=[u'nobody'],)
+    self.assertResourceCalled('User', 'nobody',
+                              gid='hadoop',
+                              groups=[u'nobody'],)
+    self.assertResourceCalled('User', 'hdfs',
+                              gid='hadoop',
+                              groups=[u'hadoop'],)
+    self.assertResourceCalled('User', 'mapred',
+                              gid='hadoop',
+                              groups=[u'hadoop'],)
+    self.assertResourceCalled('User', 'zookeeper',
+                              gid='hadoop',)
+    self.assertResourceCalled('Package', 'unzip',)
+    self.assertResourceCalled('Package', 'net-snmp',)
+    self.assertResourceCalled('Package', 'net-snmp-utils',)
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
new file mode 100644
index 0000000..18a83b8
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+class TestHookBeforeStart(RMFTestCase):
+  def test_hook_default(self):
+    self.executeScript("1.3.2/hooks/before-START/scripts/hook.py",
+                       classname="BeforeConfigureHook",
+                       command="hook",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'mkdir -p /tmp/HDP-artifacts/ ; curl -kf --retry 10 http://c6401.ambari.apache.org:8080/resources//jdk-7u45-linux-x64.tar.gz -o /tmp/HDP-artifacts//jdk-7u45-linux-x64.tar.gz',
+                              not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
+                              path = ['/bin', '/usr/bin/'],
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /usr/jdk64 ; cd /usr/jdk64 ; tar -xf /tmp/HDP-artifacts//jdk-7u45-linux-x64.tar.gz > /dev/null 2>&1',
+                              not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
+                              path = ['/bin', '/usr/bin/'],
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /tmp/HDP-artifacts/; curl -kf --retry 10 http://c6401.ambari.apache.org:8080/resources//UnlimitedJCEPolicyJDK7.zip -o /tmp/HDP-artifacts//UnlimitedJCEPolicyJDK7.zip',
+                              not_if = 'test -e /tmp/HDP-artifacts//UnlimitedJCEPolicyJDK7.zip',
+                              ignore_failures = True,
+                              path = ['/bin', '/usr/bin/'],
+                              )
+    self.assertResourceCalled('File', '/etc/snmp/snmpd.conf',
+                              content = Template('snmpd.conf.j2'),
+                              )
+    self.assertResourceCalled('Service', 'snmpd',
+                              action = ['restart'],
+                              )
+    self.assertResourceCalled('Execute', '/bin/echo 0 > /selinux/enforce',
+                              only_if = 'test -f /selinux/enforce',
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /usr/lib/hadoop/lib/native/Linux-i386-32; ln -sf /usr/lib/libsnappy.so /usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /usr/lib/hadoop/lib/native/Linux-amd64-64; ln -sf /usr/lib64/libsnappy.so /usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
+                              )
+    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
+                              content = Template('hdfs.conf.j2'),
+                              owner = 'root',
+                              group = 'root',
+                              mode = 0644,
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
+                              content = Template('taskcontroller.cfg.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-env.sh',
+                              content = Template('hadoop-env.sh.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/commons-logging.properties',
+                              content = Template('commons-logging.properties.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
+                              content = Template('slaves.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/health_check',
+                              content = Template('health_check.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.driver=.*~ambari.jobhistory.driver=org.postgresql.Driver~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA=.*~log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.driver=.*~log4j.appender.JHA.driver=${ambari.jobhistory.driver}~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.database=.*~log4j.appender.JHA.database=${ambari.jobhistory.database}~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.logger=.*~ambari.jobhistory.logger=DEBUG,JHA~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.password=.*~log4j.appender.JHA.password=${ambari.jobhistory.password}~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.database=.*~ambari.jobhistory.database=jdbc:postgresql://c6401.ambari.apache.org/ambarirca~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=.*~log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=.*~log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.user=.*~log4j.appender.JHA.user=${ambari.jobhistory.user}~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.user=.*~ambari.jobhistory.user=mapred~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.password=.*~ambari.jobhistory.password=mapred~' /etc/hadoop/conf/log4j.properties",
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-metrics2.properties',
+                              content = Template('hadoop-metrics2.properties.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['core-site'],
+                              )
+    self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
+                              owner = 'mapred',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['mapred-site'],
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/task-log4j.properties',
+                              content = StaticFile('task-log4j.properties'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['hdfs-site'],
+                              )
+    self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/hadoop-tools.jar',
+                              to = '/usr/lib/hadoop/hadoop-tools.jar',
+                              )
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_monitor.py b/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_monitor.py
deleted file mode 100644
index 8fae96a..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/GANGLIA/test_ganglia_monitor.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from stacks.utils.RMFTestCase import *
-
-
-class TestGangliaMonitor(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py",
-                       classname="GangliaMonitor",
-                       command="configure",
-                       config_file="default.json"
-                      )
-    self.assertResourceCalled('Group', 'hadoop',
-                              )
-    self.assertResourceCalled('Group', 'nobody',
-                              )
-    self.assertResourceCalled('Group', 'nobody',
-                              )
-    self.assertResourceCalled('User', 'nobody',
-                              groups = [u'nobody'],
-                              )
-    self.assertResourceCalled('User', 'nobody',
-                              groups = [u'nobody'],
-                              )
-    self.assertResourceCalled('Directory', '/etc/ganglia/hdp',
-                              owner = 'root',
-                              group = 'hadoop',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
-                              owner = 'root',
-                              group = 'root',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
-                              content = StaticFile('gmetad.init'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
-                              content = StaticFile('gmond.init'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
-                              content = StaticFile('checkGmond.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
-                              content = StaticFile('checkRrdcached.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
-                              content = StaticFile('gmetadLib.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
-                              content = StaticFile('gmondLib.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
-                              content = StaticFile('rrdcachedLib.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
-                              content = StaticFile('setupGanglia.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
-                              content = StaticFile('startGmetad.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
-                              content = StaticFile('startGmond.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
-                              content = StaticFile('startRrdcached.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
-                              content = StaticFile('stopGmetad.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
-                              content = StaticFile('stopGmond.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
-                              content = StaticFile('stopRrdcached.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
-                              content = StaticFile('teardownGanglia.sh'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
-                              owner = 'root',
-                              template_tag = None,
-                              group = 'root',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
-                              owner = 'root',
-                              template_tag = None,
-                              group = 'root',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
-                              owner = 'root',
-                              template_tag = None,
-                              group = 'root',
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -o root -g hadoop',
-                              path = ['/usr/libexec/hdp/ganglia',
-                                      '/usr/sbin',
-                                      '/sbin:/usr/local/bin',
-                                      '/bin',
-                                      '/usr/bin'],
-                              )
-    self.assertResourceCalled('Directory', '/etc/ganglia/conf.d',
-                              owner = 'root',
-                              group = 'hadoop',
-                              )
-    self.assertResourceCalled('File', '/etc/ganglia/conf.d/modgstatus.conf',
-                              owner = 'root',
-                              group = 'hadoop',
-                              )
-    self.assertResourceCalled('File', '/etc/ganglia/conf.d/multicpu.conf',
-                              owner = 'root',
-                              group = 'hadoop',
-                              )
-    self.assertResourceCalled('File', '/etc/ganglia/gmond.conf',
-                              owner = 'root',
-                              group = 'hadoop',
-                              )
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py",
-                       classname="GangliaMonitor",
-                       command="start",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Execute', 'chkconfig gmond off',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertResourceCalled('Execute', 'service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py",
-                       classname="GangliaMonitor",
-                       command="stop",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Execute', 'service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
-                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                              )
-    self.assertNoMoreResources()
-


[03/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_jobtracker.py b/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_jobtracker.py
deleted file mode 100644
index 80762f8..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_jobtracker.py
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestJobtracker(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "start",
-                       config_file="default.json"
-      )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker',
-                       user = 'mapred',
-                       not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
-    )
-    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1',
-                       user = 'mapred',
-                       initial_wait = 5,
-                       not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop jobtracker',
-                              user = 'mapred'
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid')
-    self.assertNoMoreResources()
-
-  def test_decommission_default(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "decommission",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-                       owner = 'mapred',
-                       content = Template('exclude_hosts_list.j2'),
-                       group = 'hadoop',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'mradmin -refreshNodes',
-                       conf_dir = '/etc/hadoop/conf',
-                       kinit_override = True,
-                       user = 'mapred',
-    )
-    self.assertNoMoreResources()
-
-  def test_configure_secured(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "start",
-                       config_file="secured.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker',
-                       user = 'mapred',
-                       not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
-    )
-    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1',
-                       user = 'mapred',
-                       initial_wait = 5,
-                       not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop jobtracker',
-                       user = 'mapred'
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid')
-    self.assertNoMoreResources()
-
-  def test_decommission_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py",
-                       classname = "Jobtracker",
-                       command = "decommission",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-                       owner = 'mapred',
-                       content = Template('exclude_hosts_list.j2'),
-                       group = 'hadoop',
-    )
-
-    self.assertResourceCalled('ExecuteHadoop', 'mradmin -refreshNodes',
-                       conf_dir = '/etc/hadoop/conf',
-                       kinit_override = True,
-                       user = 'mapred',
-    )
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_service_check.py b/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_service_check.py
deleted file mode 100644
index 337c3c0..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_service_check.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestServiceCheck(RMFTestCase):
-
-  def test_service_check_default(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/service_check.py",
-                        classname="ServiceCheck",
-                        command="service_check",
-                        config_file="default.json"
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput',
-                        try_sleep = 5,
-                        tries = 1,
-                        user = 'ambari-qa',
-                        conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'jar /usr/lib/hadoop//hadoop-examples.jar wordcount mapredsmokeinput mapredsmokeoutput',
-                        logoutput = True,
-                        try_sleep = 5,
-                        tries = 1,
-                        user = 'ambari-qa',
-                        conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e mapredsmokeoutput',
-                        user = 'ambari-qa',
-                        conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertNoMoreResources()
-
-  def test_service_check_secured(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/service_check.py",
-                       classname="ServiceCheck",
-                       command="service_check",
-                       config_file="secured.json"
-    )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;',
-                       user = 'ambari-qa',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput',
-                       try_sleep = 5,
-                       tries = 1,
-                       user = 'ambari-qa',
-                       conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'jar /usr/lib/hadoop//hadoop-examples.jar wordcount mapredsmokeinput mapredsmokeoutput',
-                       logoutput = True,
-                       try_sleep = 5,
-                       tries = 1,
-                       user = 'ambari-qa',
-                       conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e mapredsmokeoutput',
-                       user = 'ambari-qa',
-                       conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_tasktracker.py b/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_tasktracker.py
deleted file mode 100644
index a36bf52..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/MAPREDUCE/test_mapreduce_tasktracker.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestTasktracker(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py",
-                       classname = "Tasktracker",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py",
-                         classname = "Tasktracker",
-                         command = "start",
-                         config_file="default.json"
-      )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker',
-                              user = 'mapred',
-                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
-    )
-    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1',
-                              user = 'mapred',
-                              initial_wait = 5,
-                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py",
-                       classname = "Tasktracker",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop tasktracker',
-                              user = 'mapred'
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid')
-    self.assertNoMoreResources()
-
-
-  def test_configure_secured(self):
-
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py",
-                       classname = "Tasktracker",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py",
-                         classname = "Tasktracker",
-                         command = "start",
-                         config_file="secured.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker',
-                              user = 'mapred',
-                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
-    )
-    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1',
-                              user = 'mapred',
-                              initial_wait = 5,
-                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py",
-                       classname = "Tasktracker",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop tasktracker',
-                              user = 'mapred'
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid')
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
-      owner = 'mapred',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/mapred',
-      owner = 'mapred',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
-      owner = 'mapred',
-      group = 'hadoop',
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_service_check.py b/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_service_check.py
deleted file mode 100644
index 87bc826..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_service_check.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestSqoopServiceCheck(RMFTestCase):
-
-  def test_service_check_secured(self):
-    self.executeScript("1.3.3/services/SQOOP/package/scripts/service_check.py",
-                       classname = "SqoopServiceCheck",
-                       command = "service_check",
-                       config_file="secured.json")
-    self.assertResourceCalled('Execute', '/usr/bin/kinit  -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa',)
-    self.assertResourceCalled('Execute', 'sqoop version',
-                              logoutput = True,
-                              user = 'ambari-qa',)
-    self.assertNoMoreResources()
-
-  def test_service_check_default(self):
-    self.executeScript("1.3.3/services/SQOOP/package/scripts/service_check.py",
-                         classname = "SqoopServiceCheck",
-                         command = "service_check",
-                         config_file="default.json")
-    self.assertResourceCalled('Execute', 'sqoop version',
-                              logoutput = True,
-                              user = 'ambari-qa',)
-    self.assertNoMoreResources()
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_sqoop.py
deleted file mode 100644
index 92c0d17..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/SQOOP/test_sqoop.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestSqoop(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/SQOOP/package/scripts/sqoop_client.py",
-                       classname = "SqoopClient",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
-                              to = '/usr/share/java/mysql-connector-java.jar',)
-    self.assertResourceCalled('Directory', '/usr/lib/sqoop/conf',
-                              owner = 'sqoop',
-                              group = 'hadoop',)
-    self.assertResourceCalled('TemplateConfig', '/usr/lib/sqoop/conf/sqoop-env.sh',
-                              owner = 'sqoop',
-                              template_tag = None,)
-    self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env-template.sh',
-                              owner = 'sqoop',
-                              group = 'hadoop',)
-    self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site-template.xml',
-                              owner = 'sqoop',
-                              group = 'hadoop',)
-    self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site.xml',
-                              owner = 'sqoop',
-                              group = 'hadoop',)
-    self.assertNoMoreResources()
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_server.py b/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_server.py
deleted file mode 100644
index 048ec10..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_server.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, patch
-from stacks.utils.RMFTestCase import *
-
-class TestWebHCatServer(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "start",
-                       config_file="default.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh start',
-                              not_if = 'ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1',
-                              user = 'hcat'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "stop",
-                       config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh stop',
-                              user = 'hcat',
-                              )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/webhcat/webhcat.pid')
-    self.assertNoMoreResources()
-
-    def test_configure_secured(self):
-      self.executeScript("2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py",
-                         classname = "WebHCatServer",
-                         command = "configure",
-                         config_file="secured.json"
-      )
-
-      self.assert_configure_secured()
-      self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "start",
-                       config_file="secured.json"
-    )
-
-    self.assert_configure_secured()
-    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh start',
-                              not_if = 'ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1',
-                              user = 'hcat'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py",
-                       classname = "WebHCatServer",
-                       command = "stop",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh stop',
-                              user = 'hcat',
-                              )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/webhcat/webhcat.pid')
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-    self.assertResourceCalled('Directory', '/var/run/webhcat',
-      owner = 'hcat',
-      group = 'hadoop',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/var/log/webhcat',
-      owner = 'hcat',
-      group = 'hadoop',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
-      owner = 'hcat',
-      group = 'hadoop',
-      conf_dir = '/etc/hcatalog/conf',
-      configurations = self.getConfig()['configurations']['webhcat-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('File', '/etc/hcatalog/conf/webhcat-env.sh',
-      content = Template('webhcat-env.sh.j2'),
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/lib/hadoop-mapreduce/hadoop-streaming*.jar /apps/webhcat/hadoop-streaming.jar',
-      not_if = ' hadoop fs -ls /apps/webhcat/hadoop-streaming.jar >/dev/null 2>&1',
-      user = 'hcat',
-      conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/pig.tar.gz /apps/webhcat/pig.tar.gz',
-      not_if = ' hadoop fs -ls /apps/webhcat/pig.tar.gz >/dev/null 2>&1',
-      user = 'hcat',
-      conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/hive.tar.gz /apps/webhcat/hive.tar.gz',
-      not_if = ' hadoop fs -ls /apps/webhcat/hive.tar.gz >/dev/null 2>&1',
-      user = 'hcat',
-      conf_dir = '/etc/hadoop/conf',
-    )
-
-  def assert_configure_secured(self):
-    self.assertResourceCalled('Directory', '/var/run/webhcat',
-      owner = 'hcat',
-      group = 'hadoop',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/var/log/webhcat',
-      owner = 'hcat',
-      group = 'hadoop',
-      recursive = True,
-      mode = 493,
-    )
-    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
-      owner = 'hcat',
-      group = 'hadoop',
-      conf_dir = '/etc/hcatalog/conf',
-      configurations = self.getConfig()['configurations']['webhcat-site'], # don't hardcode all the properties
-    )
-    self.assertResourceCalled('File', '/etc/hcatalog/conf/webhcat-env.sh',
-      content = Template('webhcat-env.sh.j2'),
-      owner = 'hcat',
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;',
-      path = ['/bin'],
-      user = 'hcat',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/lib/hadoop-mapreduce/hadoop-streaming*.jar /apps/webhcat/hadoop-streaming.jar',
-      not_if = '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; hadoop fs -ls /apps/webhcat/hadoop-streaming.jar >/dev/null 2>&1',
-      user = 'hcat',
-      conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/pig.tar.gz /apps/webhcat/pig.tar.gz',
-      not_if = ' hadoop fs -ls /apps/webhcat/pig.tar.gz >/dev/null 2>&1',
-      user = 'hcat',
-      conf_dir = '/etc/hadoop/conf',
-    )
-    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/hive.tar.gz /apps/webhcat/hive.tar.gz',
-      not_if = ' hadoop fs -ls /apps/webhcat/hive.tar.gz >/dev/null 2>&1',
-      user = 'hcat',
-      conf_dir = '/etc/hadoop/conf',
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_service_check.py b/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_service_check.py
deleted file mode 100644
index 87ff406..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/WEBHCAT/test_webhcat_service_check.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestServiceCheck(RMFTestCase):
-
-  def test_service_check_default(self):
-
-    self.executeScript("1.3.3/services/WEBHCAT/package/scripts/service_check.py",
-                       classname="WebHCatServiceCheck",
-                       command="service_check",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('File', '/tmp/templetonSmoke.sh',
-                       content = StaticFile('templetonSmoke.sh'),
-                       mode = 493,
-    )
-    self.assertResourceCalled('Execute', 'sh /tmp/templetonSmoke.sh c6402.ambari.apache.org ambari-qa no_keytab False /usr/bin/kinit',
-                       logoutput = True,
-                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                       tries = 3,
-                       try_sleep = 5,
-    )
-    self.assertNoMoreResources()
-
-  def test_service_check_secured(self):
-
-    self.executeScript("1.3.3/services/WEBHCAT/package/scripts/service_check.py",
-                       classname="WebHCatServiceCheck",
-                       command="service_check",
-                       config_file="secured.json"
-    )
-    self.assertResourceCalled('File', '/tmp/templetonSmoke.sh',
-                       content = StaticFile('templetonSmoke.sh'),
-                       mode = 493,
-    )
-    self.assertResourceCalled('Execute', 'sh /tmp/templetonSmoke.sh c6402.ambari.apache.org ambari-qa /etc/security/keytabs/smokeuser.headless.keytab True /usr/bin/kinit',
-                       logoutput = True,
-                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                       tries = 3,
-                       try_sleep = 5,
-    )
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_client.py
deleted file mode 100644
index 1100118..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_client.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestZookeeperClient(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
-                       classname = "ZookeeperClient",
-                       command = "configure",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
-      owner = 'zookeeper',
-      content = Template('zoo.cfg.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
-      owner = 'zookeeper',
-      content = Template('zookeeper-env.sh.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
-      owner = 'zookeeper',
-      content = Template('configuration.xsl.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Directory', '/var/run/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
-      owner = 'zookeeper',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()
-
-  def test_configure_secured(self):
-
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
-                       classname = "ZookeeperClient",
-                       command = "configure",
-                       config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
-      owner = 'zookeeper',
-      content = Template('zoo.cfg.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
-      owner = 'zookeeper',
-      content = Template('zookeeper-env.sh.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
-      owner = 'zookeeper',
-      content = Template('configuration.xsl.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Directory', '/var/run/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_client_jaas.conf',
-      owner = 'zookeeper',
-      content = Template('zookeeper_client_jaas.conf.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
-      owner = 'zookeeper',
-      group = 'hadoop',
-    )
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_server.py
deleted file mode 100644
index 79058ec..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_server.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, patch
-from stacks.utils.RMFTestCase import *
-
-class TestZookeeperServer(RMFTestCase):
-
-  def test_configure_default(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                   classname = "ZookeeperServer",
-                   command = "configure",
-                   config_file="default.json"
-    )
-
-    self.assert_configure_default()
-    self.assertNoMoreResources()
-
-  def test_start_default(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                   classname = "ZookeeperServer",
-                   command = "start",
-                   config_file="default.json"
-    )
-
-    self.assert_configure_default()
-    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh start',
-                    not_if = 'ls /var/run/zookeeper/zookeeper_server.pid >/dev/null 2>&1 && ps `cat /var/run/zookeeper/zookeeper_server.pid` >/dev/null 2>&1',
-                    user = 'zookeeper'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_default(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "stop",
-                  config_file="default.json"
-    )
-
-    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh stop',
-      user = 'zookeeper',
-    )
-    self.assertResourceCalled('Execute', 'rm -f /var/run/zookeeper/zookeeper_server.pid')
-    self.assertNoMoreResources()
-
-  def test_configure_secured(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "configure",
-                  config_file="secured.json"
-    )
-
-    self.assert_configure_secured()
-    self.assertNoMoreResources()
-
-  def test_start_secured(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "start",
-                  config_file="secured.json"
-    )
-
-    self.assert_configure_secured()
-    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh start',
-                  not_if = 'ls /var/run/zookeeper/zookeeper_server.pid >/dev/null 2>&1 && ps `cat /var/run/zookeeper/zookeeper_server.pid` >/dev/null 2>&1',
-                  user = 'zookeeper'
-    )
-    self.assertNoMoreResources()
-
-  def test_stop_secured(self):
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "stop",
-                  config_file="secured.json"
-    )
-
-    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh stop',
-                  user = 'zookeeper',
-    )
-
-    self.assertResourceCalled('Execute', 'rm -f /var/run/zookeeper/zookeeper_server.pid')
-    self.assertNoMoreResources()
-
-  def assert_configure_default(self):
-
-    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
-      owner = 'zookeeper',
-      content = Template('zoo.cfg.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
-      owner = 'zookeeper',
-      content = Template('zookeeper-env.sh.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
-      owner = 'zookeeper',
-      content = Template('configuration.xsl.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Directory', '/var/run/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/hadoop/zookeeper/myid',
-      content = '1',
-      mode = 420,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
-      owner = 'zookeeper',
-      group = 'hadoop',
-    )
-
-  def assert_configure_secured(self):
-
-    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
-      owner = 'zookeeper',
-      content = Template('zoo.cfg.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
-      owner = 'zookeeper',
-      content = Template('zookeeper-env.sh.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
-      owner = 'zookeeper',
-      content = Template('configuration.xsl.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('Directory', '/var/run/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/var/log/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
-      owner = 'zookeeper',
-      group = 'hadoop',
-      recursive = True,
-    )
-    self.assertResourceCalled('File', '/hadoop/zookeeper/myid',
-      content = '1',
-      mode = 420,
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_jaas.conf',
-      owner = 'zookeeper',
-      content = Template('zookeeper_jaas.conf.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_client_jaas.conf',
-      owner = 'zookeeper',
-      content = Template('zookeeper_client_jaas.conf.j2'),
-      group = 'hadoop',
-    )
-    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
-      owner = 'zookeeper',
-      group = 'hadoop',
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_service_check.py b/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_service_check.py
deleted file mode 100644
index 8ba984c..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/ZOOKEEPER/test_zookeeper_service_check.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-from mock.mock import MagicMock, call, patch
-from stacks.utils.RMFTestCase import *
-
-class TestServiceCheck(RMFTestCase):
-
-  def test_service_check_default(self):
-
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/service_check.py",
-                       classname="ZookeeperServiceCheck",
-                       command="service_check",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('File', '/tmp/zkSmoke.sh',
-                       content = StaticFile('zkSmoke.sh'),
-                       mode = 493,
-    )
-    self.assertResourceCalled('Execute', 'sh /tmp/zkSmoke.sh /usr/lib/zookeeper/bin/zkCli.sh ambari-qa /etc/zookeeper/conf 2181 False /usr/bin/kinit no_keytab',
-                       logoutput = True,
-                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                       tries = 3,
-                       try_sleep = 5,
-    )
-    self.assertNoMoreResources()
-
-  def test_service_check_secured(self):
-
-    self.executeScript("1.3.3/services/ZOOKEEPER/package/scripts/service_check.py",
-                       classname="ZookeeperServiceCheck",
-                       command="service_check",
-                       config_file="secured.json"
-    )
-    self.assertResourceCalled('File', '/tmp/zkSmoke.sh',
-                       content = StaticFile('zkSmoke.sh'),
-                       mode = 493,
-    )
-    self.assertResourceCalled('Execute', 'sh /tmp/zkSmoke.sh /usr/lib/zookeeper/bin/zkCli.sh ambari-qa /etc/zookeeper/conf 2181 True /usr/bin/kinit /etc/security/keytabs/smokeuser.headless.keytab',
-                       logoutput = True,
-                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
-                       tries = 3,
-                       try_sleep = 5,
-    )
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/configs/default.json b/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
deleted file mode 100644
index 6d12470..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/configs/default.json
+++ /dev/null
@@ -1,444 +0,0 @@
-{
-    "roleCommand": "INSTALL", 
-    "clusterName": "cl1", 
-    "hostname": "c6402.ambari.apache.org", 
-    "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "ambari_db_rca_password": "mapred", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-1.3.4\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\"}]", 
-        "package_list": "[{\"type\":\"rpm\",\"name\":\"lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop\"},{\"type\":\"rpm\",\"name\":\"hadoop-libhdfs\"},{\"type\":\"rpm\",\"name\":\"hadoop-native\"},{\"type\":\"rpm\",\"name\":\"hadoop-pipes\"},{\"type\":\"rpm\",\"name\":\"hadoop-sbin\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo-native\"},{\"type\":\"rpm\",\"name\":\"snappy\"},{\"type\":\"rpm\",\"name\":\"snappy-devel\"},{\"type\":\"rpm\",\"name\":\"ambari-log4j\"}]", 
-        "stack_version": "1.3.4", 
-        "stack_name": "HDP", 
-        "db_name": "ambari", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
-        "ambari_db_rca_username": "mapred", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
-    }, 
-    "commandType": "EXECUTION_COMMAND", 
-    "roleParams": {}, 
-    "serviceName": "HDFS", 
-    "role": "DATANODE", 
-    "commandParams": {
-        "command_timeout": "600", 
-        "service_package_folder": "HDFS",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
-        "script": "scripts/datanode.py",
-        "excluded_hosts": "host1,host2"
-    }, 
-    "taskId": 18, 
-    "public_hostname": "c6402.ambari.apache.org", 
-    "configurations": {
-        "mapred-site": {
-            "ambari.mapred.child.java.opts.memory": "768", 
-            "mapred.job.reduce.input.buffer.percent": "0.0", 
-            "mapred.job.map.memory.mb": "1536", 
-            "mapred.output.compression.type": "BLOCK", 
-            "mapred.jobtracker.maxtasks.per.job": "-1", 
-            "mapred.hosts": "/etc/hadoop/conf/mapred.include", 
-            "mapred.map.output.compression.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
-            "mapred.child.root.logger": "INFO,TLA", 
-            "mapred.tasktracker.tasks.sleeptime-before-sigkill": "250", 
-            "io.sort.spill.percent": "0.9", 
-            "mapred.reduce.parallel.copies": "30", 
-            "mapred.userlog.retain.hours": "24", 
-            "mapred.reduce.tasks.speculative.execution": "false", 
-            "io.sort.mb": "200", 
-            "mapreduce.cluster.administrators": " hadoop", 
-            "mapred.jobtracker.blacklist.fault-timeout-window": "180", 
-            "mapred.job.tracker.history.completed.location": "/mapred/history/done", 
-            "mapred.job.shuffle.input.buffer.percent": "0.7", 
-            "io.sort.record.percent": ".2", 
-            "mapred.cluster.max.reduce.memory.mb": "4096", 
-            "mapred.job.reuse.jvm.num.tasks": "1", 
-            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
-            "mapred.job.tracker.http.address": "c6402.ambari.apache.org:50030", 
-            "mapred.job.tracker.persist.jobstatus.hours": "1", 
-            "mapred.healthChecker.script.path": "/etc/hadoop/conf/health_check", 
-            "mapreduce.jobtracker.staging.root.dir": "/user", 
-            "mapred.job.shuffle.merge.percent": "0.66", 
-            "mapred.cluster.reduce.memory.mb": "2048", 
-            "mapred.job.tracker.persist.jobstatus.dir": "/mapred/jobstatus", 
-            "mapreduce.tasktracker.group": "hadoop", 
-            "mapred.tasktracker.map.tasks.maximum": "4", 
-            "mapred.child.java.opts": "-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true", 
-            "mapred.jobtracker.retirejob.check": "10000", 
-            "mapred.job.tracker": "c6402.ambari.apache.org:50300", 
-            "mapreduce.history.server.embedded": "false", 
-            "io.sort.factor": "100", 
-            "hadoop.job.history.user.location": "none", 
-            "mapreduce.reduce.input.limit": "10737418240", 
-            "mapred.reduce.slowstart.completed.maps": "0.05", 
-            "mapred.cluster.max.map.memory.mb": "6144", 
-            "mapreduce.history.server.http.address": "c6402.ambari.apache.org:51111", 
-            "mapred.jobtracker.taskScheduler": "org.apache.hadoop.mapred.CapacityTaskScheduler", 
-            "mapred.max.tracker.blacklists": "16", 
-            "mapred.local.dir": "/hadoop/mapred", 
-            "mapred.healthChecker.interval": "135000", 
-            "mapred.jobtracker.restart.recover": "false", 
-            "mapred.jobtracker.blacklist.fault-bucket-width": "15", 
-            "mapred.jobtracker.retirejob.interval": "21600000", 
-            "tasktracker.http.threads": "50", 
-            "mapred.job.tracker.persist.jobstatus.active": "false", 
-            "mapred.system.dir": "/mapred/system", 
-            "mapred.tasktracker.reduce.tasks.maximum": "2", 
-            "mapred.cluster.map.memory.mb": "1536", 
-            "mapred.hosts.exclude": "/etc/hadoop/conf/mapred.exclude", 
-            "mapred.queue.names": "default", 
-            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
-            "mapreduce.fileoutputcommitter.marksuccessfuljobs": "false", 
-            "mapred.job.reduce.memory.mb": "2048", 
-            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
-            "mapred.healthChecker.script.timeout": "60000", 
-            "jetty.connector": "org.mortbay.jetty.nio.SelectChannelConnector", 
-            "mapreduce.jobtracker.split.metainfo.maxsize": "50000000", 
-            "mapred.job.tracker.handler.count": "50", 
-            "mapred.inmem.merge.threshold": "1000", 
-            "mapred.task.tracker.task-controller": "org.apache.hadoop.mapred.DefaultTaskController", 
-            "mapred.jobtracker.completeuserjobs.maximum": "0", 
-            "mapred.task.timeout": "600000", 
-            "mapred.map.tasks.speculative.execution": "false"
-        }, 
-        "oozie-site": {
-            "oozie.service.PurgeService.purge.interval": "3600", 
-            "oozie.service.CallableQueueService.queue.size": "1000", 
-            "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd", 
-            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
-            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
-            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
-            "oozie.service.JPAService.create.db.schema": "false", 
-            "oozie.authentication.kerberos.name.rules": "DEFAULT", 
-            "oozie.service.ActionService.executor.ext.classes": "org.apache.oozie.action.email.EmailActionExecutor,\norg.apache.oozie.action.hadoop.HiveActionExecutor,\norg.apache.oozie.action.hadoop.ShellActionExecutor,\norg.apache.oozie.action.hadoop.SqoopActionExecutor,\norg.apache.oozie.action.hadoop.DistcpActionExecutor", 
-            "oozie.service.AuthorizationService.authorization.enabled": "true", 
-            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
-            "oozie.service.JPAService.jdbc.password": "q", 
-            "oozie.service.coord.normal.default.timeout": "120", 
-            "oozie.service.JPAService.pool.max.active.conn": "10", 
-            "oozie.service.PurgeService.older.than": "30", 
-            "oozie.db.schema.name": "oozie", 
-            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
-            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
-            "oozie.service.CallableQueueService.callable.concurrency": "3", 
-            "oozie.service.JPAService.jdbc.username": "oozie", 
-            "oozie.service.CallableQueueService.threads": "10", 
-            "oozie.systemmode": "NORMAL", 
-            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
-            "oozie.authentication.type": "simple", 
-            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
-            "oozie.system.id": "oozie-${user.name}"
-        }, 
-        "webhcat-site": {
-            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
-            "templeton.exec.timeout": "60000", 
-            "templeton.override.enabled": "false", 
-            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
-            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181", 
-            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse", 
-            "templeton.storage.class": "org.apache.hcatalog.templeton.tool.ZooKeeperStorage", 
-            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
-            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
-            "templeton.port": "50111", 
-            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
-            "templeton.hadoop": "/usr/bin/hadoop", 
-            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
-            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
-            "templeton.hcat": "/usr/bin/hcat", 
-            "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz"
-        }, 
-        "global": {
-            "security_enabled": "false", 
-            "hbase_pid_dir": "/var/run/hbase", 
-            "proxyuser_group": "users", 
-            "zk_user": "zookeeper", 
-            "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
-            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
-            "syncLimit": "5", 
-            "oozie_pid_dir": "/var/run/oozie", 
-            "hbase_regionserver_heapsize": "1024m", 
-            "dtnode_heapsize": "1024m", 
-            "jtnode_heapsize": "1024m", 
-            "hcat_log_dir": "/var/log/webhcat", 
-            "oozie_hostname": "c6402.ambari.apache.org", 
-            "hive_aux_jars_path": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
-            "tickTime": "2000", 
-            "hive_ambari_database": "MySQL", 
-            "rca_enabled": "true", 
-            "namenode_heapsize": "1024m", 
-            "oozie_log_dir": "/var/log/oozie", 
-            "hive_jdbc_driver": "com.mysql.jdbc.Driver", 
-            "oozie_user": "oozie", 
-            "oozie_data_dir": "/hadoop/oozie/data", 
-            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
-            "lzo_enabled": "true", 
-            "namenode_opt_maxnewsize": "200m", 
-            "smokeuser": "ambari-qa", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "hive_hostname": "c6402.ambari.apache.org", 
-            "hive_metastore_port": "9083", 
-            "hbase_master_heapsize": "1024m", 
-            "zk_data_dir": "/hadoop/zookeeper", 
-            "hcat_pid_dir": "/etc/run/webhcat", 
-            "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
-            "initLimit": "10", 
-            "hive_database_type": "mysql", 
-            "oozie_database": "New Derby Database", 
-            "zk_pid_dir": "/var/run/zookeeper", 
-            "user_group": "hadoop", 
-            "hive_user": "hive", 
-            "gmond_user": "nobody", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "q@q.q", 
-            "hive_database": "New MySQL Database", 
-            "nagios_web_password": "q", 
-            "clientPort": "2181", 
-            "oozie_derby_database": "Derby", 
-            "snappy_enabled": "true", 
-            "ganglia_conf_dir": "/etc/ganglia/hdp", 
-            "hdfs_user": "hdfs", 
-            "hbase_user": "hbase", 
-            "oozie_database_type": "derby", 
-            "webhcat_user": "hcat", 
-            "zk_log_dir": "/var/log/zookeeper", 
-            "jtnode_opt_maxnewsize": "200m", 
-            "mysql_connector_url": "${download_url}/mysql-connector-java-5.1.18.zip", 
-            "gmetad_user": "nobody", 
-            "hive_log_dir": "/var/log/hive", 
-            "jtnode_opt_newsize": "200m", 
-            "namenode_opt_newsize": "200m", 
-            "mapred_user": "mapred", 
-            "nagios_group": "nagios", 
-            "hive_pid_dir": "/var/run/hive", 
-            "hcat_user": "hcat", 
-            "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "nagios_user": "nagios", 
-            "hbase_log_dir": "/var/log/hbase"
-        }, 
-        "hdfs-site": {
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.access.time.precision": "0", 
-            "ipc.server.max.response.size": "5242880", 
-            "dfs.web.ugi": "gopher,gopher", 
-            "dfs.support.append": "true", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.replication": "3", 
-            "ambari.dfs.datanode.http.port": "50075", 
-            "dfs.block.size": "134217728", 
-            "dfs.data.dir": "/hadoop/hdfs/data", 
-            "dfs.datanode.du.reserved": "1073741824", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.namenode.handler.count": "100", 
-            "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", 
-            "dfs.datanode.socket.write.timeout": "0", 
-            "ipc.server.read.threadpool.size": "5", 
-            "dfs.balance.bandwidthPerSec": "6250000", 
-            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.permissions.supergroup": "hdfs", 
-            "dfs.https.address": "c6401.ambari.apache.org:50470", 
-            "ambari.dfs.datanode.port": "50010", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.name.dir": "/hadoop/hdfs/namenode", 
-            "dfs.hosts": "/etc/hadoop/conf/dfs.include", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.secondary.https.port": "50490", 
-            "dfs.permissions": "true", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.block.local-path-access.user": "hbase", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.secondary.http.address": "c6402.ambari.apache.org:50090", 
-            "dfs.http.address": "c6401.ambari.apache.org:50070", 
-            "dfs.https.port": "50070", 
-            "dfs.replication.max": "50", 
-            "dfs.datanode.max.xcievers": "4096", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.du.pct": "0.85f", 
-            "dfs.safemode.threshold.pct": "1.0f", 
-            "dfs.umaskmode": "077"
-        }, 
-        "hbase-site": {
-            "hbase.client.keyvalue.maxsize": "10485760", 
-            "hbase.hstore.compactionThreshold": "3", 
-            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
-            "hbase.regionserver.handler.count": "60", 
-            "dfs.client.read.shortcircuit": "true", 
-            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
-            "hbase.hregion.memstore.block.multiplier": "2", 
-            "hbase.hregion.memstore.flush.size": "134217728", 
-            "hbase.superuser": "hbase", 
-            "hbase.zookeeper.property.clientPort": "2181", 
-            "hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.WritableRpcEngine", 
-            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
-            "zookeeper.session.timeout": "60000", 
-            "hbase.tmp.dir": "/hadoop/hbase", 
-            "hbase.hregion.max.filesize": "10737418240", 
-            "hfile.block.cache.size": "0.40", 
-            "hbase.security.authentication": "simple", 
-            "hbase.zookeeper.quorum": "c6401.ambari.apache.org", 
-            "zookeeper.znode.parent": "/hbase-unsecure", 
-            "hbase.hstore.blockingStoreFiles": "10", 
-            "hbase.hregion.majorcompaction": "86400000", 
-            "hbase.security.authorization": "false", 
-            "hbase.cluster.distributed": "true", 
-            "hbase.hregion.memstore.mslab.enabled": "true", 
-            "hbase.client.scanner.caching": "100", 
-            "hbase.zookeeper.useMulti": "true"
-        }, 
-        "core-site": {
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.proxyuser.hcat.groups": "users", 
-            "fs.checkpoint.size": "67108864", 
-            "hadoop.proxyuser.oozie.groups": "users", 
-            "fs.default.name": "hdfs://c6401.ambari.apache.org:8020", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.proxyuser.hive.groups": "users", 
-            "webinterface.private.actions": "false", 
-            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "hadoop.security.authentication": "simple", 
-            "fs.checkpoint.edits.dir": "${fs.checkpoint.dir}", 
-            "fs.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
-            "fs.trash.interval": "360", 
-            "ipc.client.idlethreshold": "8000", 
-            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
-            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
-            "io.compression.codec.lzo.class": "com.hadoop.compression.lzo.LzoCodec", 
-            "fs.checkpoint.period": "21600", 
-            "ipc.client.connection.maxidletime": "30000", 
-            "ipc.client.connect.max.retries": "50"
-        }, 
-        "hive-site": {
-            "hive.enforce.sorting": "true", 
-            "javax.jdo.option.ConnectionPassword": "q", 
-            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
-            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
-            "fs.file.impl.disable.cache": "true", 
-            "hive.auto.convert.join.noconditionaltask": "true", 
-            "hive.map.aggr": "true", 
-            "hive.security.authorization.enabled": "false", 
-            "hive.optimize.reducededuplication.min.reducer": "1", 
-            "hive.optimize.bucketmapjoin": "true", 
-            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
-            "hive.mapjoin.bucket.cache.size": "10000", 
-            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
-            "javax.jdo.option.ConnectionUserName": "hive", 
-            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
-            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
-            "hive.metastore.client.socket.timeout": "60", 
-            "hive.semantic.analyzer.factory.impl": "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory", 
-            "hive.auto.convert.join": "true", 
-            "hive.enforce.bucketing": "true", 
-            "hive.mapred.reduce.tasks.speculative.execution": "false", 
-            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
-            "hive.auto.convert.sortmerge.join": "true", 
-            "fs.hdfs.impl.disable.cache": "true", 
-            "hive.security.authorization.manager": "org.apache.hcatalog.security.HdfsAuthorizationProvider", 
-            "ambari.hive.db.schema.name": "hive", 
-            "hive.metastore.execute.setugi": "true", 
-            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
-            "hive.server2.enable.doAs": "true", 
-            "hive.optimize.mapjoin.mapreduce": "true"
-        }
-    }, 
-    "configurationTags": {
-        "mapred-site": {
-            "tag": "version1"
-        }, 
-        "oozie-site": {
-            "tag": "version1"
-        }, 
-        "webhcat-site": {
-            "tag": "version1"
-        }, 
-        "global": {
-            "tag": "version1"
-        }, 
-        "hdfs-site": {
-            "tag": "version1"
-        }, 
-        "hbase-site": {
-            "tag": "version1"
-        }, 
-        "core-site": {
-            "tag": "version1"
-        }, 
-        "hive-site": {
-            "tag": "version1"
-        }
-    }, 
-    "commandId": "1-1", 
-    "clusterHostInfo": {
-        "snamenode_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "ganglia_monitor_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hive_metastore_hosts": [
-            "c6402.ambari.apache.org"
-        ], 
-        "all_ping_ports": [
-            "8670", 
-            "8670"
-        ], 
-        "mapred_tt_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "all_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "hbase_rs_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "slave_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "namenode_host": [
-            "c6401.ambari.apache.org"
-        ], 
-        "ganglia_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hbase_master_hosts": [
-            "c6401.ambari.apache.org"
-        ], 
-        "hive_mysql_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "oozie_server": [
-            "c6402.ambari.apache.org"
-        ], 
-        "webhcat_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "jtnode_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "zookeeper_hosts": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hs_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hive_server_host": [
-            "c6402.ambari.apache.org"
-        ]
-    }
-}
\ No newline at end of file


[38/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index 8180689..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def configure(self, env):
-    import params
-
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index b033185..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-import os
-
-def datanode(action=None):
-  import params
-
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(os.path.dirname(data_dir),
-                recursive=True,
-                mode=0755)
-      Directory(data_dir,
-                recursive=False,
-                mode=0750,
-                owner=params.hdfs_user,
-                group=params.user_group)
-
-  if action == "start":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )
-  if action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index d8e191f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,192 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-import urlparse
-
-
-def namenode(action=None, format=True):
-  import params
-
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if format:
-      format_namenode()
-      pass
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      create_pid_dir=True,
-      create_log_dir=True,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-    # TODO: extract creating of dirs to different services
-    create_app_directories()
-    create_user_directories()
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_app_directories():
-  import params
-
-  hdfs_directory(name="/tmp",
-                 owner=params.hdfs_user,
-                 mode="777"
-  )
-  #mapred directories
-  if params.has_jobtracker:
-    hdfs_directory(name="/mapred",
-                   owner=params.mapred_user
-    )
-    hdfs_directory(name="/mapred/system",
-                   owner=params.mapred_user
-    )
-    #hbase directories
-  if len(params.hbase_master_hosts) != 0:
-    hdfs_directory(name=params.hbase_hdfs_root_dir,
-                   owner=params.hbase_user
-    )
-    hdfs_directory(name=params.hbase_staging_dir,
-                   owner=params.hbase_user,
-                   mode="711"
-    )
-    #hive directories
-  if len(params.hive_server_host) != 0:
-    hdfs_directory(name=params.hive_apps_whs_dir,
-                   owner=params.hive_user,
-                   mode="777"
-    )
-  if len(params.hcat_server_hosts) != 0:
-    hdfs_directory(name=params.webhcat_apps_dir,
-                   owner=params.webhcat_user,
-                   mode="755"
-    )
-  if len(params.hs_host) != 0:
-    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-  pass
-
-
-def create_user_directories():
-  import params
-
-  hdfs_directory(name=params.smoke_hdfs_user_dir,
-                 owner=params.smoke_user,
-                 mode=params.smoke_hdfs_user_mode
-  )
-
-  if params.has_hive_server_host:
-    hdfs_directory(name=params.hive_hdfs_user_dir,
-                   owner=params.hive_user,
-                   mode=params.hive_hdfs_user_mode
-    )
-
-  if params.has_hcat_server_host:
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      hdfs_directory(name=params.hcat_hdfs_user_dir,
-                     owner=params.hcat_user,
-                     mode=params.hcat_hdfs_user_mode
-      )
-    hdfs_directory(name=params.webhcat_hdfs_user_dir,
-                   owner=params.webhcat_user,
-                   mode=params.webhcat_hdfs_user_mode
-    )
-
-  if params.has_oozie_server:
-    hdfs_directory(name=params.oozie_hdfs_user_dir,
-                   owner=params.oozie_user,
-                   mode=params.oozie_hdfs_user_mode
-    )
-
-
-def format_namenode(force=None):
-  import params
-
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if True:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True)
-    else:
-      File('/tmp/checkForFormat.sh',
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
-        "{dfs_name_dir}"),
-              not_if=format("test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=params.user_group
-  )
-
-  ExecuteHadoop('dfsadmin -refreshNodes',
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index a943455..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-  elif action == "start":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )
-  elif action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index 80700c8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py
deleted file mode 100644
index e172501..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-
-if System.get_instance().os_type == "oraclelinux":
-  ulimit_cmd = ''
-else:
-  ulimit_cmd = "ulimit -c unlimited && if [ `ulimit -c` != 'unlimited' ]; then exit 77; fi && "
-
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = status_params.hdfs_user
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group = "users"
-
-#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/bin"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-
-dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket"
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
-
-# if stack_version[0] == "2":
-#dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-# else:
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.name.dir']#","/tmp/hadoop-hdfs/dfs/name")
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
-hbase_staging_dir = "/apps/hbase/staging"
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
-webhcat_apps_dir = "/apps/webhcat"
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
-
-if has_oozie_server:
-  oozie_hdfs_user_dir = format("/user/{oozie_user}")
-  oozie_hdfs_user_mode = 775
-if has_hcat_server_host:
-  hcat_hdfs_user_dir = format("/user/{hcat_user}")
-  hcat_hdfs_user_mode = 755
-  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-  webhcat_hdfs_user_mode = 755
-if has_hive_server_host:
-  hive_hdfs_user_dir = format("/user/{hive_user}")
-  hive_hdfs_user_mode = 700
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 770
-
-namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-
-# if stack_version[0] == "2":
-#fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
-# else:
-fs_checkpoint_dir = config['configurations']['core-site']['fs.checkpoint.dir']#","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-# if stack_version[0] == "2":
-#dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-# else:
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 5cd264b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=15,
-                  tries=20
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.has_zkfc_hosts:
-      pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-      pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-      check_zkfc_process_cmd = format(
-        "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-      Execute(check_zkfc_process_cmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index b2a3bd1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.configure(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 4097373..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index a67d3b2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,133 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False, keytab=None, principal=None):
-  import params
-
-  kinit_cmd = "true"
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  hadoop_daemon = format(
-    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  if params.security_enabled:
-    principal_replaced = principal.replace("_HOST", params.hostname)
-    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
-
-    if name == "datanode":
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-  daemon_cmd = format("{cmd} {action} {name}")
-
-  service_is_up = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
-
-  Execute(kinit_cmd)
-  Execute(daemon_cmd,
-          user = user,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-         ignore_failures=True
-    )
-
-
-def hdfs_directory(name=None, owner=None, group=None,
-                   mode=None, recursive_chown=False, recursive_chmod=False):
-  import params
-
-  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
-  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  stub_dir = params.namenode_dirs_created_stub_dir
-  stub_filename = params.namenode_dirs_stub_filename
-  dir_absent_in_stub = format(
-    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
-  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
-  tries = 30
-  try_sleep = 10
-  dfs_check_nn_status_cmd = "true"
-
-  #if params.stack_version[0] == "2":
-  #mkdir_cmd = format("fs -mkdir -p {name}")
-  #else:
-  mkdir_cmd = format("fs -mkdir {name}")
-
-  if params.security_enabled:
-    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
-            user = params.hdfs_user)
-  ExecuteHadoop(mkdir_cmd,
-                try_sleep=try_sleep,
-                tries=tries,
-                not_if=format(
-                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "{dir_exists} && ! {namenode_safe_mode_off}"),
-                only_if=format(
-                  "su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "! {dir_exists}'"),
-                conf_dir=params.hadoop_conf_dir,
-                user=params.hdfs_user
-  )
-  Execute(record_dir_in_stub,
-          user=params.hdfs_user,
-          only_if=format("! {dir_absent_in_stub}")
-  )
-
-  recursive = "-R" if recursive_chown else ""
-  perm_cmds = []
-
-  if owner:
-    chown = owner
-    if group:
-      chown = format("{owner}:{group}")
-    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
-  if mode:
-    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
-  for cmd in perm_cmds:
-    ExecuteHadoop(cmd,
-                  user=params.hdfs_user,
-                  only_if=format("su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}'"),
-                  try_sleep=try_sleep,
-                  tries=tries,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c3af46e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
deleted file mode 100644
index ae7f586..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,148 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value></value>
-    <description>Database hname</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_name</name>
-    <value>hive</value>
-    <description>Database username to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_port</name>
-    <value>9083</value>
-    <description>Hive Metastore port.</description>
-  </property>    
-  <property>
-    <name>hive_lib</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive Library.</description>
-  </property>    
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>      
-  <property>
-    <name>hive_conf_dir</name>
-    <value>/etc/hive/conf</value>
-    <description>Hive Conf Dir.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>mysql_connector_url</name>
-    <value>${download_url}/mysql-connector-java-5.1.18.zip</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_aux_jars_path</name>
-    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
-    <description>Hive auxiliary jar path.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-
-  <!--HCAT-->
-
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/etc/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-exec-log4j.xml
deleted file mode 100644
index b0f5268..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-exec-log4j.xml
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hive.log.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>hive.root.logger</name>
-    <value>INFO,FA</value>
-  </property>
-  <property>
-    <name>hive.log.dir</name>
-    <value>/tmp/${user.name}</value>
-  </property>
-  <property>
-    <name>hive.log.file</name>
-    <value>${hive.query.id}.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hive.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshhold</name>
-    <value>${hive.log.threshold}</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA</name>
-    <value>org.apache.log4j.FileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.File</name>
-    <value>${hive.log.dir}/${hive.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
-  </property>
-  <property>
-    <name>log4j.category.DataNucleus</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore.Schema</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Datastore</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Plugin</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.MetaData</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Query</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.General</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Enhancer</name>
-    <value>ERROR,FA</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-log4j.xml
deleted file mode 100644
index e92c3e8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-log4j.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hive.log.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>hive.root.logger</name>
-    <value>WARN,DRFA</value>
-  </property>
-  <property>
-    <name>hive.log.dir</name>
-    <value>/tmp/${user.name}</value>
-  </property>
-  <property>
-    <name>hive.log.file</name>
-    <value>hive.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hive.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshold</name>
-    <value>${hive.log.threshold}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hive.log.dir}/${hive.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.encoding</name>
-    <value>UTF-8</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
-  </property>
-  <property>
-    <name>log4j.category.DataNucleus</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore.Schema</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Datastore</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Plugin</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.MetaData</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Query</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.General</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Enhancer</name>
-    <value>ERROR,DRFA</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index e5b8bf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,250 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
-      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
-      process runs as.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable HDFS filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable local filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
-      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
-      this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-      of buckets, a sort-merge join can be performed by setting this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
deleted file mode 100644
index be1fe70..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,188 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <category>MASTER</category>
-          <!-- may be 0 if specifying external metastore, how to specify this? -->
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MYSQL_SERVER</name>
-          <category>MASTER</category>
-          <!-- may be 0 if specifying external db, how to specify this? -->
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index 8d31b91..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index 9e7b33f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh
deleted file mode 100644
index fa90c2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh
deleted file mode 100644
index 9350776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py
deleted file mode 100644
index 2993d3a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hcat():
-  import params
-
-  Directory(params.hcat_conf_dir,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            recursive=True
-  )
-
-  hcat_TemplateConfig('hcat-env.sh')
-
-
-def hcat_TemplateConfig(name):
-  import params
-
-  TemplateConfig(format("{hcat_conf_dir}/{name}"),
-                 owner=params.hcat_user,
-                 group=params.user_group
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100644
index 54a8937..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hcat import hcat
-
-class HCatClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hcat()
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HCatClient().execute()


[23/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
new file mode 100644
index 0000000..c013624
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import sys
+
+
+def webhcat():
+  import params
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.config_dir,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+  )
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=Template('webhcat-env.sh.j2')
+  )
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  copyFromLocal(path='/usr/lib/hadoop-mapreduce/hadoop-streaming*.jar',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
+                kinnit_if_needed=kinit_if_needed
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
+  )
+
+
+def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
+  import params
+
+  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
+  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
+
+  ExecuteHadoop(copy_cmd,
+                not_if=unless_cmd,
+                user=owner,
+                conf_dir=params.hadoop_conf_dir)
+
+  if not owner:
+    chown = None
+  else:
+    if not group:
+      chown = owner
+    else:
+      chown = format('{owner}:{group}')
+
+  if not chown:
+    chown_cmd = format("fs -chown {chown} {dest_dir}")
+
+    ExecuteHadoop(copy_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)
+
+  if not mode:
+    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
+
+    ExecuteHadoop(chmod_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_server.py
new file mode 100644
index 0000000..4365111
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_server.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+
+class WebHCatServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    webhcat_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+if __name__ == "__main__":
+  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
new file mode 100644
index 0000000..12c3854
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service(action='start'):
+  import params
+
+  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
+
+  if action == 'start':
+    demon_cmd = format('{cmd} start')
+    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
+    Execute(demon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test
+    )
+  elif action == 'stop':
+    demon_cmd = format('{cmd} stop')
+    Execute(demon_cmd,
+            user=params.webhcat_user
+    )
+    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/templates/webhcat-env.sh.j2
new file mode 100644
index 0000000..9ea4a79
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/templates/webhcat-env.sh.j2
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml
index edd1636..429c39f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml
@@ -61,4 +61,28 @@
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
   </property>
+
+  <!--MAPREDUCE2-->
+
+  <property>
+    <name>hs_host</name>
+    <value></value>
+    <description>History Server.</description>
+  </property>
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <description>Mapreduce Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <description>Mapreduce PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <description>Mapreduce User</description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-queue-acls.xml
new file mode 100644
index 0000000..9389ed0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-queue-acls.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to submit jobs to the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to
+      submit jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to delete jobs or modify job's priority for jobs not owned by the current
+      user in the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to do
+      this operation.
+    </description>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-site.xml
new file mode 100644
index 0000000..943f6bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/mapred-site.xml
@@ -0,0 +1,386 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>200</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.7</value>
+    <description>
+      The soft limit in the serialization buffer. Once reached, a thread will
+      begin to spill the contents to disk in the background. Note that
+      collection will not block if this threshold is exceeded while a spill
+      is already in progress, so spills may be larger than this threshold when
+      it is set to less than .5
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>100</value>
+    <description>
+      The number of streams to merge at once while sorting files.
+      This determines the number of open file handles.
+    </description>
+  </property>
+
+<!-- map/reduce properties -->
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+    <description>
+      Administrators for MapReduce applications.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>
+      The default number of parallel transfers run by reduce during
+      the copy(shuffle) phase.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some map tasks
+      may be executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some reduce tasks may be
+      executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>
+      Fraction of the number of maps in the job which should be complete before
+      reduces are scheduled for the job.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>
+      The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>
+      The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.output.compress.codec</name>
+    <value></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+    <description>
+      If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>
+      The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+  </property>
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>false</value>
+    <description>
+      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>300000</value>
+    <description>
+      The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Map task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>13562</value>
+    <description>
+      Default port that the ShuffleHandler will run on.
+      ShuffleHandler is a service run at the NodeManager to facilitate
+      transfers of intermediate Map outputs to requesting Reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>
+      The runtime framework for executing MapReduce jobs. Can be one of local,
+      classic or yarn.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+    <description>
+      The staging dir used while submitting jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Xmx312m</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>
+      Java opts for the MR App Master processes for admin purposes.
+      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+      thus its options can be overridden user.
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.log.level</name>
+    <value>INFO</value>
+    <description>MR App Master process log level.</description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.env</name>
+    <value></value>
+    <description>
+      User added environment variables for the MR App Master
+      processes. Example :
+      1) A=foo  This will set the env variable A to foo
+      2) B=$B:c This is inherit tasktracker's B env variable.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for map tasks.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for reduce tasks.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It is a
+      application-specific setting. It should not be larger than the global number
+      set by resourcemanager. Otherwise, it will be override. The default number is
+      set to 2, to allow at least one retry for AM.
+    </description>
+  </property>
+
+
+
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of maps.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of reduces.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the map task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the reduce task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+    <description>
+      Should the job outputs be compressed?
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-log4j.xml
new file mode 100644
index 0000000..879cf03
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-log4j.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hadoop.mapreduce.jobsummary.logger</name>
+    <value>${hadoop.root.logger}</value>
+  </property>
+  <property>
+    <name>hadoop.mapreduce.jobsummary.log.file</name>
+    <value>hadoop-mapreduce.jobsummary.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.JSA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+
+  <!--Only for HDP2.0 below-->
+  <property>
+    <name>yarn.server.resourcemanager.appsummary.log.file</name>
+    <value>hadoop-mapreduce.jobsummary.log</value>
+  </property>
+  <property>
+    <name>yarn.server.resourcemanager.appsummary.logger</name>
+    <value>${hadoop.root.logger}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.File</name>
+    <value>/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.MaxFileSize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.MaxBackupIndex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RMSUMMARY.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary</name>
+    <value>${yarn.server.resourcemanager.appsummary.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary</name>
+    <value>false</value>
+  </property>
+  <!--Only for HDP2.0 above-->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
index 3c4d24c..9bfd9ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
@@ -328,4 +328,15 @@
     </description>
   </property>
 
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml
index bc80f4a..a5a1e94 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metainfo.xml
@@ -15,28 +15,160 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-    <version>2.2.0.2.0.6.0</version>
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.1.1</version>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
         <component>
-            <name>RESOURCEMANAGER</name>
-            <category>MASTER</category>
+          <name>NODEMANAGER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-nodemanager</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-proxyserver</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-resourcemanager</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
         <component>
-            <name>NODEMANAGER</name>
-            <category>SLAVE</category>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-       <component>
-            <name>YARN_CLIENT</name>
-            <category>CLIENT</category>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>core-site</config-type>
-      <config-type>yarn-site</config-type>
-      <config-type>capacity-scheduler</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce-historyserver</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metrics.json
index a60ab40..68efe9f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metrics.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/metrics.json
@@ -1370,6 +1370,26 @@
             "pointInTime": true,
             "temporal": false
           },
+          "metrics/jvm/HeapMemoryMax":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/HeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryMax":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
           "metrics/jvm/threadsRunnable": {
             "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
             "pointInTime": true,
@@ -1450,26 +1470,6 @@
             "pointInTime": true,
             "temporal": false
           },
-          "metrics/jvm/HeapMemoryMax":{
-            "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/HeapMemoryUsed":{
-            "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryMax":{
-            "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryUsed":{
-            "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
           "metrics/jvm/memNonHeapUsedM": {
             "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
             "pointInTime": true,
@@ -2282,6 +2282,26 @@
             "pointInTime": true,
             "temporal": false
           },
+          "metrics/jvm/HeapMemoryMax":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/HeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryMax":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
           "metrics/jvm/memNonHeapUsedM": {
             "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
             "pointInTime": true,
@@ -2307,26 +2327,6 @@
             "pointInTime": true,
             "temporal": false
           },
-          "metrics/jvm/HeapMemoryMax":{
-            "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/HeapMemoryUsed":{
-            "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryMax":{
-            "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryUsed":{
-            "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
           "metrics/jvm/threadsBlocked": {
             "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
             "pointInTime": true,
@@ -2531,4 +2531,4 @@
       }
     ]
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100644
index 0000000..dac198a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+  command = "curl"
+  httpGssnegotiate = "--negotiate"
+  userpswd = "-u:"
+  insecure = "-k"# This is smoke test, no need to check CA of server
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+      
+  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+  try:
+    proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    (stdout, stderr) = proc.communicate()
+    response = json.loads(stdout)
+    if response == None:
+      print 'There is no response for url: ' + str(url)
+      exit(1)
+    return response
+  except Exception as e:
+    print 'Error getting response for url:' + str(url), e
+    exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAvailabilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking availability status of component', e
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAbilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking ability of component', e
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+  
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py
new file mode 100644
index 0000000..9b6003c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Histroryserver(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('historyserver',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('historyserver',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.histroyserver_pid_file)
+
+if __name__ == "__main__":
+  Histroryserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapred_service_check.py
new file mode 100644
index 0000000..3b789f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
+    create_file_cmd = format("fs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+    ExecuteHadoop(cleanup_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100644
index 0000000..54119a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class MapReduce2Client(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/nodemanager.py
new file mode 100644
index 0000000..dbeaca0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('nodemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+if __name__ == "__main__":
+  Nodemanager().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
new file mode 100644
index 0000000..bdcf88e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/hadoop/conf"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['global']['hdfs_user']
+
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+yarn_heapsize = config['configurations']['global']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
+
+yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
+
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+
+user_group = config['configurations']['global']['user_group']
+limits_conf_dir = "/etc/security/limits.d"
+hadoop_conf_dir = "/etc/hadoop/conf"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+
+#exclude file
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
new file mode 100644
index 0000000..0540670
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+
+class Resourcemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('resourcemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+
+    yarn_user = params.yarn_user
+    conf_dir = params.config_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("/usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    Execute(yarn_refresh_cmd,
+            user=yarn_user
+    )
+    pass
+
+
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
new file mode 100644
index 0000000..441ef6c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(
+    name,
+    action='start'):
+
+  import params
+
+  if (name == 'historyserver'):
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
+    usr = params.mapred_user
+  else:
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
+    usr = params.yarn_user
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{cmd} start {name}")
+    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            user=usr,
+            not_if=no_op
+    )
+
+    Execute(no_op,
+            user=usr,
+            not_if=no_op,
+            initial_wait=5
+    )
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {name}")
+    Execute(daemon_cmd,
+            user=usr,
+    )
+    rm_pid = format("rm -f {pid_file}")
+    Execute(rm_pid,
+            user=usr
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
new file mode 100644
index 0000000..c53cc78
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    run_yarn_check_cmd = "/usr/bin/yarn node -list"
+
+    component_type = 'rm'
+    if params.hadoop_ssl_enabled:
+      component_address = params.rm_webui_https_address
+    else:
+      component_address = params.rm_webui_address
+
+    validateStatusFileName = "validateYarnComponentStatus.py"
+    validateStatusFilePath = format("/tmp/{validateStatusFileName}")
+
+    validateStatusCmd = format("{validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            logoutput=True
+    )
+
+    Execute(run_yarn_check_cmd,
+                  user=params.smokeuser
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py
new file mode 100644
index 0000000..e554513
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/status_params.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+mapred_user = config['configurations']['global']['mapred_user']
+yarn_user = config['configurations']['global']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+histroyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
new file mode 100644
index 0000000..986356e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def yarn():
+  import params
+
+  Directory([params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.mapred_pid_dir, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.nm_local_dirs, params.nm_log_dirs, params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            recursive=True
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+
+  File(params.mapred_job_summary_log,
+       owner=params.mapred_user,
+       group=params.user_group
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(format("{config_dir}/yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=Template('yarn-env.sh.j2')
+  )
+
+  if params.security_enabled:
+    container_executor = format("{yarn_container_bin}/container-executor")
+    File(container_executor,
+         group=params.yarn_executor_container_group,
+         mode=06050
+    )
+    
+    File(format("{config_dir}/container-executor.cfg"),
+         group=params.user_group,
+         mode=0644,
+         content=Template('container-executor.cfg.j2')
+    )
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py
new file mode 100644
index 0000000..7e9c564
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class YarnClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  YarnClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100644
index 0000000..29ad949
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,22 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users = hfds,yarn,mapred,bin
+min.user.id=1000

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..4a4c698
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100644
index 0000000..76caea4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile 32768
+{{mapred_user}}   - nproc  65536


[39/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index ff6d0ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-  
-    File( '/tmp/hbaseSmokeVerify.sh',
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
-      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-def main():
-  import sys
-  command_type = 'perform'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseServiceCheck().execute()
-  
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index c9b20ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index 1c75d15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
deleted file mode 100644
index e971e13..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
deleted file mode 100644
index 1c75d15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index b8505b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 61fe62f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 3b3bb18..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 9102d35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index 9cf35d3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index bd1d727..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index b22ae5f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index d2bff0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,255 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml
deleted file mode 100644
index 04d51db..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_include</name>
-    <value></value>
-    <description>HDFS Include hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index 4fd0b22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,305 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hadoop.root.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>.</value>
-  </property>
-  <property>
-    <name>hadoop.log.file</name>
-    <value>hadoop.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hadoop.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshhold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.taskid</name>
-    <value>null</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.iscleanup</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.noKeepSplits</name>
-    <value>4</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.totalLogFileSize</name>
-    <value>100</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.purgeLogSplits</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.logsRetainHours</name>
-    <value>12</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA</name>
-    <value>org.apache.hadoop.mapred.TaskLogAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.taskId</name>
-    <value>${hadoop.tasklog.taskid}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.isCleanup</name>
-    <value>${hadoop.tasklog.iscleanup}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.totalLogFileSize</name>
-    <value>${hadoop.tasklog.totalLogFileSize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.security.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.category.SecurityLogger</name>
-    <value>${hadoop.security.logger}</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.file</name>
-    <value>SecurityAuth.audit</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.File</name>
-    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.File</name>
-    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxFileSize</name>
-    <value>${hadoop.security.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxBackupIndex</name>
-    <value>${hadoop.security.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>hdfs.audit.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
-    <value>${hdfs.audit.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.File</name>
-    <value>${hadoop.log.dir}/hdfs-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>mapred.audit.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.mapred.AuditLogger</name>
-    <value>${mapred.audit.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.mapred.AuditLogger</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.File</name>
-    <value>${hadoop.log.dir}/mapred-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.File</name>
-    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxFileSize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxBackupIndex</name>
-    <value>10</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.metrics.log.level</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.metrics2</name>
-    <value>${hadoop.metrics.log.level}</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service</name>
-    <value>ERROR</value>
-  </property>
-  <property>
-    <name>log4j.appender.NullAppender</name>
-    <value>org.apache.log4j.varia.NullAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.log.metrics.EventCounter</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 1fc6c59..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,476 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-      circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-    <description>
-      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
-    <description>
-      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.xcievers</name>
-    <value>4096</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>dfs.umaskmode</name>
-    <value>077</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.ugi</name>
-    <!-- cluster variant -->
-    <value>gopher,gopher</value>
-    <description>The user account used by the web interface.
-      Syntax: USERNAME,GROUP1,GROUP2, ...
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.supergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.access.time.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>Number of failed disks datanode would tolerate</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
deleted file mode 100644
index d29d2fc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>1.2.0.1.3.3.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-pipes</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-sbin</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ambari-log4j</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index eaa27cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def config(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()


[07/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py
deleted file mode 100644
index 83b8f08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def zookeeper_service(action='start'):
-  import params
-
-  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
-
-  if action == 'start':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
-    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.zk_user
-    )
-  elif action == 'stop':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
-    rm_pid = format("rm -f {zk_pid_file}")
-    Execute(daemon_cmd,
-            user=params.zk_user
-    )
-    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
deleted file mode 100644
index c003ba2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
deleted file mode 100644
index 5b68218..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime={{tickTime}}
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit={{initLimit}}
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit={{syncLimit}}
-# the directory where the snapshot is stored.
-dataDir={{zk_data_dir}}
-# the port at which the clients will connect
-clientPort={{clientPort}}
-{% for host in zookeeper_hosts %}
-server.{{loop.index}}={{host}}:2888:3888
-{% endfor %}
-
-{% if security_enabled %}
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-{% endif %}
-
-{% if zoo_cfg_properties_map_length > 0 %}
-# Custom properties
-{% endif %}
-{% for key, value in zoo_cfg_properties_map.iteritems() %}
-{{key}}={{value}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
deleted file mode 100644
index 493a2a4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME={{java64_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
deleted file mode 100644
index 696718e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
deleted file mode 100644
index aa123e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{zk_keytab_path}}"
-principal="{{zk_principal}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_monitor.py b/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_monitor.py
new file mode 100644
index 0000000..a1ba41c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_monitor.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+
+class TestGangliaMonitor(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py",
+                       classname="GangliaMonitor",
+                       command="configure",
+                       config_file="default.json"
+                      )
+    self.assertResourceCalled('Group', 'hadoop',
+                              )
+    self.assertResourceCalled('Group', 'nobody',
+                              )
+    self.assertResourceCalled('Group', 'nobody',
+                              )
+    self.assertResourceCalled('User', 'nobody',
+                              groups = [u'nobody'],
+                              )
+    self.assertResourceCalled('User', 'nobody',
+                              groups = [u'nobody'],
+                              )
+    self.assertResourceCalled('Directory', '/etc/ganglia/hdp',
+                              owner = 'root',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
+                              content = StaticFile('gmetad.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
+                              content = StaticFile('gmond.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
+                              content = StaticFile('checkGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
+                              content = StaticFile('checkRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
+                              content = StaticFile('gmetadLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
+                              content = StaticFile('gmondLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
+                              content = StaticFile('rrdcachedLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
+                              content = StaticFile('setupGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
+                              content = StaticFile('startGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
+                              content = StaticFile('startGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
+                              content = StaticFile('startRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
+                              content = StaticFile('stopGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
+                              content = StaticFile('stopGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
+                              content = StaticFile('stopRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
+                              content = StaticFile('teardownGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Directory', '/etc/ganglia/conf.d',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/conf.d/modgstatus.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/conf.d/multicpu.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/gmond.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py",
+                       classname="GangliaMonitor",
+                       command="start",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'chkconfig gmond off',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', 'service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/GANGLIA/package/scripts/ganglia_monitor.py",
+                       classname="GangliaMonitor",
+                       command="stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertNoMoreResources()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_server.py b/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_server.py
new file mode 100644
index 0000000..61f3735
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/GANGLIA/test_ganglia_server.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+
+class TestGangliaServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/GANGLIA/package/scripts/ganglia_server.py",
+                       classname="GangliaServer",
+                       command="configure",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Group', 'hadoop',
+                              )
+    self.assertResourceCalled('Group', 'nobody',
+                              )
+    self.assertResourceCalled('Group', 'nobody',
+                              )
+    self.assertResourceCalled('User', 'nobody',
+                              groups = [u'nobody'],
+                              )
+    self.assertResourceCalled('User', 'nobody',
+                              groups = [u'nobody'],
+                              )
+    self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
+                              content = StaticFile('gmetad.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
+                              content = StaticFile('gmond.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
+                              content = StaticFile('checkGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
+                              content = StaticFile('checkRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
+                              content = StaticFile('gmetadLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
+                              content = StaticFile('gmondLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
+                              content = StaticFile('rrdcachedLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
+                              content = StaticFile('setupGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
+                              content = StaticFile('startGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
+                              content = StaticFile('startGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
+                              content = StaticFile('startRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
+                              content = StaticFile('stopGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
+                              content = StaticFile('stopGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
+                              content = StaticFile('stopRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
+                              content = StaticFile('teardownGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPDataNode -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPTaskTracker -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseRegionServer -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -t -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Directory', '/var/lib/ganglia/dwoo',
+                              owner = 'nobody',
+                              recursive = True,
+                              mode = 0777,
+                              )
+    self.assertResourceCalled('Directory', '/srv/www/cgi-bin',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/srv/www/cgi-bin/rrd.py',
+                              content = StaticFile('rrd.py'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/gmetad.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/GANGLIA/package/scripts/ganglia_server.py",
+                       classname="GangliaServer",
+                       command="start",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertResourceCalled('MonitorWebserver', 'restart',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/GANGLIA/package/scripts/ganglia_server.py",
+                       classname="GangliaServer",
+                       command="stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertResourceCalled('MonitorWebserver', 'restart',
+                              )
+    self.assertNoMoreResources()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_client.py
new file mode 100644
index 0000000..327e88f
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_client.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHBaseClient(RMFTestCase):
+  
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "configure",
+                   config_file="secured.json"
+    )
+    
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_client_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "configure",
+                   config_file="default.json"
+    )
+    
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )    
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )    
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )    
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )    
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )    
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )    
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertNoMoreResources()
+    
+
+    
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_master.py
new file mode 100644
index 0000000..69db529
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_master.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHBaseMaster(RMFTestCase):
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "configure",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+    
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase'
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "stop",
+                   config_file="default.json"
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master && rm -f /var/run/hbase/hbase-hbase-master.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+
+  def test_decom_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "configure",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "stop",
+                   config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master && rm -f /var/run/hbase/hbase-hbase-master.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+
+  def test_decom_secure(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )   
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )  
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )   
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )    
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+  
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_master_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_regionserver.py
new file mode 100644
index 0000000..a18c1fd
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HBASE/test_hbase_regionserver.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHbaseRegionServer(RMFTestCase):
+  
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "configure",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+    
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase'
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "stop",
+                   config_file="default.json"
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver && rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "configure",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HBASE/package/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "stop",
+                   config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver && rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )   
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )  
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )   
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )    
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+  
+  
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties      
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_regionserver_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hcat_client.py b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hcat_client.py
new file mode 100644
index 0000000..6088548
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hcat_client.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHcatClient(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hcat_client.py",
+                       classname = "HCatClient",
+                       command = "configure",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/var/run/webhcat',
+      owner = 'hcat',
+      recursive = True,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hcatalog/conf/hcat-env.sh',
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()
+
+
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hcat_client.py",
+                         classname = "HCatClient",
+                         command = "configure",
+                         config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/var/run/webhcat',
+      owner = 'hcat',
+      recursive = True,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hcatalog/conf/hcat-env.sh',
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_client.py
new file mode 100644
index 0000000..6355b9c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_client.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHiveClient(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_client.py",
+                       classname = "HiveClient",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/etc/hive/conf',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 420,
+      conf_dir = '/etc/hive/conf',
+      configurations = self.getConfig()['configurations']['hive-site'],
+    )
+    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
+      not_if = '[ -f DBConnectionVerification.jar]',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh',
+      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf"),
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()
+
+
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_client.py",
+                       classname = "HiveClient",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/etc/hive/conf',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 420,
+      conf_dir = '/etc/hive/conf',
+      configurations = self.getConfig()['configurations']['hive-site'],
+    )
+    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
+      not_if = '[ -f DBConnectionVerification.jar]',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh',
+      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf"),
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_metastore.py
new file mode 100644
index 0000000..59084ab
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_metastore.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHiveMetastore(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_metastore.py",
+                       classname = "HiveMetastore",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_metastore.py",
+                       classname = "HiveMetastore",
+                       command = "start",
+                       config_file="default.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server',
+                              not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+                              user = 'hive'
+    )
+
+    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
+                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
+    )
+
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_metastore.py",
+                       classname = "HiveMetastore",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive.pid')
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_metastore.py",
+                       classname = "HiveMetastore",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_metastore.py",
+                       classname = "HiveMetastore",
+                       command = "start",
+                       config_file="secured.json"
+    )
+
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server',
+                              not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+                              user = 'hive'
+    )
+
+    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
+                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
+    )
+
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_metastore.py",
+                       classname = "HiveMetastore",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive.pid')
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
+      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
+      path = ['/bin', 'usr/bin/'],
+      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
+    )
+    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 384,
+      conf_dir = '/etc/hive/conf.server',
+      configurations = self.getConfig()['configurations']['hive-site'],
+    )
+    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
+      not_if = '[ -f DBConnectionVerification.jar]',
+    )
+    self.assertResourceCalled('File', '/tmp/start_metastore_script',
+      content = StaticFile('startMetastore.sh'),
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/lib/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
+      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
+      path = ['/bin', 'usr/bin/'],
+      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
+    )
+    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 384,
+      conf_dir = '/etc/hive/conf.server',
+      configurations = self.getConfig()['configurations']['hive-site'],
+    )
+    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
+      not_if = '[ -f DBConnectionVerification.jar]',
+    )
+    self.assertResourceCalled('File', '/tmp/start_metastore_script',
+      content = StaticFile('startMetastore.sh'),
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/lib/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_server.py
new file mode 100644
index 0000000..b5d9a40
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_server.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHiveServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_server.py",
+                       classname = "HiveServer",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+  
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_server.py",
+                         classname = "HiveServer",
+                         command = "start",
+                         config_file="default.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server',
+                              not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
+                              user = 'hive'
+    )
+
+    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
+                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
+    )
+
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_server.py",
+                       classname = "HiveServer",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive-server.pid')
+    self.assertNoMoreResources()
+
+    
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_server.py",
+                       classname = "HiveServer",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_server.py",
+                       classname = "HiveServer",
+                       command = "start",
+                       config_file="secured.json"
+    )
+
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server',
+                              not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
+                              user = 'hive'
+    )
+
+    self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
+                              path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin']
+    )
+
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/hive_server.py",
+                       classname = "HiveServer",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive-server.pid')
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
+      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
+      path = ['/bin', 'usr/bin/'],
+      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
+    )
+    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 384,
+      conf_dir = '/etc/hive/conf.server',
+      configurations = self.getConfig()['configurations']['hive-site'],
+    )
+    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
+      not_if = '[ -f DBConnectionVerification.jar]',
+    )
+    self.assertResourceCalled('File', '/tmp/start_hiveserver2_script',
+      content = StaticFile('startHiveserver2.sh'),
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/lib/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
+      creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
+      path = ['/bin', 'usr/bin/'],
+      not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
+    )
+    self.assertResourceCalled('Directory', '/etc/hive/conf.server',
+      owner = 'hive',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hive-site.xml',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 384,
+      conf_dir = '/etc/hive/conf.server',
+      configurations = self.getConfig()['configurations']['hive-site'],
+    )
+    self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
+      not_if = '[ -f DBConnectionVerification.jar]',
+    )
+    self.assertResourceCalled('File', '/tmp/start_hiveserver2_script',
+      content = StaticFile('startHiveserver2.sh'),
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/lib/hive',
+      owner = 'hive',
+      group = 'hadoop',
+      mode = 493,
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
+      content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
+      owner = 'hive',
+      group = 'hadoop',
+    )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_service_check.py
new file mode 100644
index 0000000..3b19451
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_hive_service_check.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+import datetime
+import  resource_management.libraries.functions
+@patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
+class TestServiceCheck(RMFTestCase):
+
+  def test_service_check_default(self):
+
+    self.executeScript("1.3.2/services/HIVE/package/scripts/service_check.py",
+                        classname="HiveServiceCheck",
+                        command="service_check",
+                        config_file="default.json"
+    )
+    self.assertResourceCalled('File', '/tmp/hiveserver2Smoke.sh',
+                        content = StaticFile('hiveserver2Smoke.sh'),
+                        mode = 493,
+    )
+    self.assertResourceCalled('File', '/tmp/hiveserver2.sql',
+                        content = StaticFile('hiveserver2.sql'),
+    )
+    self.assertResourceCalled('Execute', "env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hiveserver2Smoke.sh jdbc:hive2://[u'c6402.ambari.apache.org']:10000 /tmp/hiveserver2.sql",
+                        logoutput = True,
+                        path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                        tries = 3,
+                        user = 'ambari-qa',
+                        try_sleep = 5,
+    )
+    self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
+                        content = StaticFile('hcatSmoke.sh'),
+                        mode = 493,
+    )
+    self.assertResourceCalled('Execute', 'sh /tmp/hcatSmoke.sh hcatsmoke prepare',
+                        logoutput = True,
+                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+                        tries = 3,
+                        user = 'ambari-qa',
+                        try_sleep = 5,
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
+                        logoutput = True,
+                        user = 'hdfs',
+                        conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('Execute', 'sh /tmp/hcatSmoke.sh hcatsmoke cleanup',
+                        logoutput = True,
+                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+                        tries = 3,
+                        user = 'ambari-qa',
+                        try_sleep = 5,
+    )
+    self.assertNoMoreResources()
+
+  def test_service_check_secured(self):
+
+    self.executeScript("1.3.2/services/HIVE/package/scripts/service_check.py",
+                        classname="HiveServiceCheck",
+                        command="service_check",
+                        config_file="secured.json"
+    )
+    self.assertResourceCalled('File', '/tmp/hiveserver2Smoke.sh',
+                        content = StaticFile('hiveserver2Smoke.sh'),
+                        mode = 493,
+    )
+    self.assertResourceCalled('File', '/tmp/hiveserver2.sql',
+                        content = StaticFile('hiveserver2.sql'),
+    )
+    self.assertResourceCalled('Execute', "/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hiveserver2Smoke.sh jdbc:hive2://[u'c6402.ambari.apache.org']:10000/\\;principal=/etc/security/keytabs/hive.service.keytab /tmp/hiveserver2.sql",
+                        logoutput = True,
+                        path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                        tries = 3,
+                        user = 'ambari-qa',
+                        try_sleep = 5,
+    )
+    self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
+                        content = StaticFile('hcatSmoke.sh'),
+                        mode = 493,
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; sh /tmp/hcatSmoke.sh hcatsmoke prepare',
+                        logoutput = True,
+                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+                        tries = 3,
+                        user = 'ambari-qa',
+                        try_sleep = 5,
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
+                        logoutput = True,
+                        user = 'hdfs',
+                        conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; sh /tmp/hcatSmoke.sh hcatsmoke cleanup',
+                        logoutput = True,
+                        path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+                        tries = 3,
+                        user = 'ambari-qa',
+                        try_sleep = 5,
+    )
+    self.assertNoMoreResources()


[18/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 39fe6e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py
deleted file mode 100644
index e6e7fb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
-  """
-  @param heapsize_str: str (e.g '1000m')
-  @param xmn_percent: float (e.g 0.2)
-  @param xmn_max: integer (e.g 512)
-  """
-  heapsize = int(re.search('\d+',heapsize_str).group(0))
-  heapsize_unit = re.search('\D+',heapsize_str).group(0)
-  xmn_val = int(math.floor(heapsize*xmn_percent))
-  xmn_val -= xmn_val % 8
-  
-  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
-  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py
deleted file mode 100644
index 95f3e30..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-def hbase(type=None # 'master' or 'regionserver' or 'client'
-              ):
-  import params
-  
-  Directory( params.conf_dir,
-      owner = params.hbase_user,
-      group = params.user_group,
-      recursive = True
-  )
-  
-  XmlConfig( "hbase-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig( "hdfs-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-  
-  if 'hbase-policy' in params.config['configurations']:
-    XmlConfig( "hbase-policy.xml",
-      configurations = params.config['configurations']['hbase-policy'],
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  # Manually overriding ownership of file installed by hadoop package
-  else: 
-    File( format("{conf_dir}/hbase-policy.xml"),
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  
-  hbase_TemplateConfig( 'hbase-env.sh')     
-       
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
-  )
-
-  hbase_TemplateConfig( 'regionservers')
-
-  if params.security_enabled:
-    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
-  
-  if type != "client":
-    Directory( params.pid_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-  
-    Directory ( [params.tmp_dir, params.log_dir],
-      owner = params.hbase_user,
-      recursive = True
-    )
-
-  if (params.log4j_props != None):
-    PropertiesFile('log4j.properties',
-      dir=params.conf_dir,
-      properties=params.log4j_props,
-      mode=0664,
-      owner=params.hbase_user,
-      group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.conf_dir}/log4j.properties"))):
-    File(format("{params.conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hbase_user
-    )
-
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
-  import params
-
-  TemplateConfig( format("{conf_dir}/{name}"),
-      owner = params.hbase_user,
-      template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index 0f2a1bc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-
-         
-class HbaseClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/'
-  stdoutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
-  
-  HbaseClient().execute()
-  
-if __name__ == "__main__":
-  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_decommission.py
deleted file mode 100644
index dba89fa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_decommission.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hbase_decommission(env):
-  import params
-
-  env.set_params(params)
-
-  if params.hbase_drain_only == True:
-    print "TBD: Remove host from draining"
-    pass
-
-  else:
-
-    kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};") if params.security_enabled else ""
-
-    hosts = params.hbase_excluded_hosts.split(",")
-    for host in hosts:
-      if host:
-        print "TBD: Add host to draining"
-        regionmover_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {conf_dir} org.jruby.Main {region_mover} unload {host}")
-
-        Execute(regionmover_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-      pass
-    pass
-  pass
-
-
-pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index 9c78e5c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-from hbase_decommission import hbase_decommission
-
-         
-class HbaseMaster(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='master')
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'master',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
-    check_process_status(pid_file)
-
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_decommission(env)
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
-  stroutputf = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
-  
-  HbaseMaster().execute()
-  
-if __name__ == "__main__":
-  HbaseMaster().execute()
-  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index 2d91e75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseRegionServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='regionserver')
-      
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'regionserver',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'regionserver',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
-    check_process_status(pid_file)
-    
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseRegionServer().execute()
-  
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py
deleted file mode 100644
index 7a1248b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
-    import params
-  
-    role = name
-    cmd = format("{daemon_script} --config {conf_dir}")
-    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
-    
-    daemon_cmd = None
-    no_op_test = None
-    
-    if action == 'start':
-      daemon_cmd = format("{cmd} start {role}")
-      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    elif action == 'stop':
-      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
-
-    if daemon_cmd is not None:
-      Execute ( daemon_cmd,
-        not_if = no_op_test,
-        user = params.hbase_user
-      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py
deleted file mode 100644
index 7db6306..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from functions import calc_xmn_from_xms
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/hbase/bin/region_drainer.rb"
-hbase_cmd = "/usr/lib/hbase/bin/hbase"
-hbase_excluded_hosts = default("/commandParams/excluded_hosts", "")
-hbase_drain_only = default("/commandParams/mark_draining_only", "")
-
-hbase_user = config['configurations']['global']['hbase_user']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-user_group = config['configurations']['global']['user_group']
-
-# this is "hadoop-metrics.properties" for 1.x stacks
-metric_prop_file_name = "hadoop-metrics2-hbase.properties"
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
-regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
-service_check_data = get_unique_id_and_date()
-
-if security_enabled:
-  
-  _use_hostname_in_principal = default('instance_name', True)
-  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
-  _hostname = config['hostname']
-  _kerberos_domain = config['configurations']['global']['kerberos_domain']
-  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
-  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
-  
-  if _use_hostname_in_principal:
-    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
-  else:
-    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
-    
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#log4j.properties
-if ('hbase-log4j' in config['configurations']):
-  log4j_props = config['configurations']['hbase-log4j']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index ff6d0ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-  
-    File( '/tmp/hbaseSmokeVerify.sh',
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
-      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-def main():
-  import sys
-  command_type = 'perform'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseServiceCheck().execute()
-  
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index c9b20ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index 2583f44..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
deleted file mode 100644
index 9f2b616..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8660
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8660
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8660
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index b8505b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 61fe62f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 696718e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 9102d35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index 722cfcc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index cb9b7b0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index b22ae5f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 356cba4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,169 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml
deleted file mode 100644
index ffda6e2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1073741824</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-  
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>KeyTab Directory.</description>
-  </property>
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 51b01bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index 4fd0b22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,305 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hadoop.root.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>.</value>
-  </property>
-  <property>
-    <name>hadoop.log.file</name>
-    <value>hadoop.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hadoop.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshhold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.taskid</name>
-    <value>null</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.iscleanup</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.noKeepSplits</name>
-    <value>4</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.totalLogFileSize</name>
-    <value>100</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.purgeLogSplits</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.logsRetainHours</name>
-    <value>12</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA</name>
-    <value>org.apache.hadoop.mapred.TaskLogAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.taskId</name>
-    <value>${hadoop.tasklog.taskid}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.isCleanup</name>
-    <value>${hadoop.tasklog.iscleanup}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.totalLogFileSize</name>
-    <value>${hadoop.tasklog.totalLogFileSize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.security.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.category.SecurityLogger</name>
-    <value>${hadoop.security.logger}</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.file</name>
-    <value>SecurityAuth.audit</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.File</name>
-    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.File</name>
-    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxFileSize</name>
-    <value>${hadoop.security.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxBackupIndex</name>
-    <value>${hadoop.security.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>hdfs.audit.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
-    <value>${hdfs.audit.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.File</name>
-    <value>${hadoop.log.dir}/hdfs-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>mapred.audit.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.mapred.AuditLogger</name>
-    <value>${mapred.audit.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.mapred.AuditLogger</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.File</name>
-    <value>${hadoop.log.dir}/mapred-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.File</name>
-    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxFileSize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxBackupIndex</name>
-    <value>10</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.metrics.log.level</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.metrics2</name>
-    <value>${hadoop.metrics.log.level}</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service</name>
-    <value>ERROR</value>
-  </property>
-  <property>
-    <name>log4j.appender.NullAppender</name>
-    <value>org.apache.log4j.varia.NullAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.log.metrics.EventCounter</value>
-  </property>
-
-</configuration>


[06/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_mysql_server.py b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_mysql_server.py
new file mode 100644
index 0000000..d424a12
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HIVE/test_mysql_server.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestMySqlServer(RMFTestCase):
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/mysql_server.py",
+                       classname = "MysqlServer",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/mysql_server.py",
+                       classname = "MysqlServer",
+                       command = "start",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'service mysql start',
+                       logoutput = True,
+                       path = ['/usr/local/bin/:/bin/:/sbin/'],
+                       tries = 1,
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/mysql_server.py",
+                       classname = "MysqlServer",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'service mysql stop',
+                              logoutput = True,
+                              path = ['/usr/local/bin/:/bin/:/sbin/'],
+                              tries = 1,
+    )
+    self.assertNoMoreResources()
+
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/mysql_server.py",
+                       classname = "MysqlServer",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/mysql_server.py",
+                       classname = "MysqlServer",
+                       command = "start",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'service mysql start',
+                              logoutput = True,
+                              path = ['/usr/local/bin/:/bin/:/sbin/'],
+                              tries = 1,
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HIVE/package/scripts/mysql_server.py",
+                       classname = "MysqlServer",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'service mysql stop',
+                              logoutput = True,
+                              path = ['/usr/local/bin/:/bin/:/sbin/'],
+                              tries = 1,
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Execute', 'service mysql start',
+      logoutput = True,
+      path = ['/usr/local/bin/:/bin/:/sbin/'],
+      tries = 1,
+    )
+    self.assertResourceCalled('File', '/tmp/addMysqlUser.sh',
+      content = StaticFile('addMysqlUser.sh'),
+      mode = 493,
+    )
+    self.assertResourceCalled('Execute', ('bash', '-x', '/tmp/addMysqlUser.sh', 'mysql', u'hive', 'asd', u'c6402.ambari.apache.org'),
+      logoutput = True,
+      path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      tries = 3,
+      try_sleep = 5,
+    )
+    self.assertResourceCalled('Execute', 'service mysql stop',
+      logoutput = True,
+      path = ['/usr/local/bin/:/bin/:/sbin/'],
+      tries = 1,
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Execute', 'service mysql start',
+      logoutput = True,
+      path = ['/usr/local/bin/:/bin/:/sbin/'],
+      tries = 1,
+    )
+    self.assertResourceCalled('File', '/tmp/addMysqlUser.sh',
+      content = StaticFile('addMysqlUser.sh'),
+      mode = 493,
+    )
+    self.assertResourceCalled('Execute', ('bash', '-x', '/tmp/addMysqlUser.sh', 'mysql', u'hive', 'asd', u'c6402.ambari.apache.org'),
+      logoutput = True,
+      path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      tries = 3,
+      try_sleep = 5,
+    )
+    self.assertResourceCalled('Execute', 'service mysql stop',
+      logoutput = True,
+      path = ['/usr/local/bin/:/bin/:/sbin/'],
+      tries = 1,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_client.py b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_client.py
new file mode 100644
index 0000000..fdfbf6e
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_client.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestMapreduceClient(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/client.py",
+                       classname = "Client",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/client.py",
+      classname = "Client",
+      command = "configure",
+      config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+
+    self.assertNoMoreResources()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_historyserver.py b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_historyserver.py
new file mode 100644
index 0000000..df58f13
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_historyserver.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHistoryServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/historyserver.py",
+                       classname = "Historyserver",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/historyserver.py",
+                         classname = "Historyserver",
+                         command = "start",
+                         config_file="default.json"
+      )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver',
+                              user = 'mapred',
+                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
+    )
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1',
+                              user = 'mapred',
+                              initial_wait = 5,
+                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/historyserver.py",
+                       classname = "Historyserver",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop historyserver',
+                              user = 'mapred'
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid')
+    self.assertNoMoreResources()
+
+
+  def test_configure_secured(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/historyserver.py",
+                       classname = "Historyserver",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/historyserver.py",
+                         classname = "Historyserver",
+                         command = "start",
+                         config_file="secured.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver',
+                              user = 'mapred',
+                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
+    )
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1',
+                              user = 'mapred',
+                              initial_wait = 5,
+                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid` >/dev/null 2>&1'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/historyserver.py",
+                       classname = "Historyserver",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop historyserver',
+                              user = 'mapred'
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-historyserver.pid')
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_jobtracker.py b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_jobtracker.py
new file mode 100644
index 0000000..4c442ad
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_jobtracker.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestJobtracker(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "start",
+                       config_file="default.json"
+      )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker',
+                       user = 'mapred',
+                       not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
+    )
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1',
+                       user = 'mapred',
+                       initial_wait = 5,
+                       not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop jobtracker',
+                              user = 'mapred'
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid')
+    self.assertNoMoreResources()
+
+  def test_decommission_default(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "decommission",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+                       owner = 'mapred',
+                       content = Template('exclude_hosts_list.j2'),
+                       group = 'hadoop',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'mradmin -refreshNodes',
+                       conf_dir = '/etc/hadoop/conf',
+                       kinit_override = True,
+                       user = 'mapred',
+    )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "start",
+                       config_file="secured.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker',
+                       user = 'mapred',
+                       not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
+    )
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1',
+                       user = 'mapred',
+                       initial_wait = 5,
+                       not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid` >/dev/null 2>&1'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop jobtracker',
+                       user = 'mapred'
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-jobtracker.pid')
+    self.assertNoMoreResources()
+
+  def test_decommission_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/jobtracker.py",
+                       classname = "Jobtracker",
+                       command = "decommission",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+                       owner = 'mapred',
+                       content = Template('exclude_hosts_list.j2'),
+                       group = 'hadoop',
+    )
+
+    self.assertResourceCalled('ExecuteHadoop', 'mradmin -refreshNodes',
+                       conf_dir = '/etc/hadoop/conf',
+                       kinit_override = True,
+                       user = 'mapred',
+    )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_service_check.py
new file mode 100644
index 0000000..7391a04
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_service_check.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestServiceCheck(RMFTestCase):
+
+  def test_service_check_default(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/service_check.py",
+                        classname="ServiceCheck",
+                        command="service_check",
+                        config_file="default.json"
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput',
+                        try_sleep = 5,
+                        tries = 1,
+                        user = 'ambari-qa',
+                        conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'jar /usr/lib/hadoop//hadoop-examples.jar wordcount mapredsmokeinput mapredsmokeoutput',
+                        logoutput = True,
+                        try_sleep = 5,
+                        tries = 1,
+                        user = 'ambari-qa',
+                        conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e mapredsmokeoutput',
+                        user = 'ambari-qa',
+                        conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertNoMoreResources()
+
+  def test_service_check_secured(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/service_check.py",
+                       classname="ServiceCheck",
+                       command="service_check",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;',
+                       user = 'ambari-qa',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput',
+                       try_sleep = 5,
+                       tries = 1,
+                       user = 'ambari-qa',
+                       conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'jar /usr/lib/hadoop//hadoop-examples.jar wordcount mapredsmokeinput mapredsmokeoutput',
+                       logoutput = True,
+                       try_sleep = 5,
+                       tries = 1,
+                       user = 'ambari-qa',
+                       conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -test -e mapredsmokeoutput',
+                       user = 'ambari-qa',
+                       conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_tasktracker.py b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_tasktracker.py
new file mode 100644
index 0000000..d12f09b
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/MAPREDUCE/test_mapreduce_tasktracker.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestTasktracker(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py",
+                       classname = "Tasktracker",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py",
+                         classname = "Tasktracker",
+                         command = "start",
+                         config_file="default.json"
+      )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker',
+                              user = 'mapred',
+                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
+    )
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1',
+                              user = 'mapred',
+                              initial_wait = 5,
+                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py",
+                       classname = "Tasktracker",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop tasktracker',
+                              user = 'mapred'
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid')
+    self.assertNoMoreResources()
+
+
+  def test_configure_secured(self):
+
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py",
+                       classname = "Tasktracker",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py",
+                         classname = "Tasktracker",
+                         command = "start",
+                         config_file="secured.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker',
+                              user = 'mapred',
+                              not_if = 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
+    )
+    self.assertResourceCalled('Execute', 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1',
+                              user = 'mapred',
+                              initial_wait = 5,
+                              not_if= 'ls /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid` >/dev/null 2>&1'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/MAPREDUCE/package/scripts/tasktracker.py",
+                       classname = "Tasktracker",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop tasktracker',
+                              user = 'mapred'
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hadoop/mapred/hadoop-mapred-tasktracker.pid')
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/run/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/mapred',
+      owner = 'mapred',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/mapred',
+      owner = 'mapred',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.exclude',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/mapred.include',
+      owner = 'mapred',
+      group = 'hadoop',
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_service_check.py
new file mode 100644
index 0000000..93a14c3
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_service_check.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestSqoopServiceCheck(RMFTestCase):
+
+  def test_service_check_secured(self):
+    self.executeScript("1.3.2/services/SQOOP/package/scripts/service_check.py",
+                       classname = "SqoopServiceCheck",
+                       command = "service_check",
+                       config_file="secured.json")
+    self.assertResourceCalled('Execute', '/usr/bin/kinit  -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa',)
+    self.assertResourceCalled('Execute', 'sqoop version',
+                              logoutput = True,
+                              user = 'ambari-qa',)
+    self.assertNoMoreResources()
+
+  def test_service_check_default(self):
+    self.executeScript("1.3.2/services/SQOOP/package/scripts/service_check.py",
+                         classname = "SqoopServiceCheck",
+                         command = "service_check",
+                         config_file="default.json")
+    self.assertResourceCalled('Execute', 'sqoop version',
+                              logoutput = True,
+                              user = 'ambari-qa',)
+    self.assertNoMoreResources()
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_sqoop.py
new file mode 100644
index 0000000..c052280
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/SQOOP/test_sqoop.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestSqoop(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/SQOOP/package/scripts/sqoop_client.py",
+                       classname = "SqoopClient",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
+                              to = '/usr/share/java/mysql-connector-java.jar',)
+    self.assertResourceCalled('Directory', '/usr/lib/sqoop/conf',
+                              owner = 'sqoop',
+                              group = 'hadoop',)
+    self.assertResourceCalled('TemplateConfig', '/usr/lib/sqoop/conf/sqoop-env.sh',
+                              owner = 'sqoop',
+                              template_tag = None,)
+    self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env-template.sh',
+                              owner = 'sqoop',
+                              group = 'hadoop',)
+    self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site-template.xml',
+                              owner = 'sqoop',
+                              group = 'hadoop',)
+    self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site.xml',
+                              owner = 'sqoop',
+                              group = 'hadoop',)
+    self.assertNoMoreResources()
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_server.py b/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_server.py
new file mode 100644
index 0000000..7690954
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_server.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+class TestWebHCatServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py",
+                       classname = "WebHCatServer",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py",
+                       classname = "WebHCatServer",
+                       command = "start",
+                       config_file="default.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh start',
+                              not_if = 'ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1',
+                              user = 'hcat'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py",
+                       classname = "WebHCatServer",
+                       command = "stop",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh stop',
+                              user = 'hcat',
+                              )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/webhcat/webhcat.pid')
+    self.assertNoMoreResources()
+
+    def test_configure_secured(self):
+      self.executeScript("1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py",
+                         classname = "WebHCatServer",
+                         command = "configure",
+                         config_file="secured.json"
+      )
+
+      self.assert_configure_secured()
+      self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py",
+                       classname = "WebHCatServer",
+                       command = "start",
+                       config_file="secured.json"
+    )
+
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh start',
+                              not_if = 'ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1',
+                              user = 'hcat'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py",
+                       classname = "WebHCatServer",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh stop',
+                              user = 'hcat',
+                              )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/webhcat/webhcat.pid')
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/run/webhcat',
+      owner = 'hcat',
+      group = 'hadoop',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/var/log/webhcat',
+      owner = 'hcat',
+      group = 'hadoop',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
+      owner = 'hcat',
+      group = 'hadoop',
+      conf_dir = '/etc/hcatalog/conf',
+      configurations = self.getConfig()['configurations']['webhcat-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hcatalog/conf/webhcat-env.sh',
+      content = Template('webhcat-env.sh.j2'),
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/lib/hadoop-mapreduce/hadoop-streaming*.jar /apps/webhcat/hadoop-streaming.jar',
+      not_if = ' hadoop fs -ls /apps/webhcat/hadoop-streaming.jar >/dev/null 2>&1',
+      user = 'hcat',
+      conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/pig.tar.gz /apps/webhcat/pig.tar.gz',
+      not_if = ' hadoop fs -ls /apps/webhcat/pig.tar.gz >/dev/null 2>&1',
+      user = 'hcat',
+      conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/hive.tar.gz /apps/webhcat/hive.tar.gz',
+      not_if = ' hadoop fs -ls /apps/webhcat/hive.tar.gz >/dev/null 2>&1',
+      user = 'hcat',
+      conf_dir = '/etc/hadoop/conf',
+    )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/run/webhcat',
+      owner = 'hcat',
+      group = 'hadoop',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/var/log/webhcat',
+      owner = 'hcat',
+      group = 'hadoop',
+      recursive = True,
+      mode = 493,
+    )
+    self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
+      owner = 'hcat',
+      group = 'hadoop',
+      conf_dir = '/etc/hcatalog/conf',
+      configurations = self.getConfig()['configurations']['webhcat-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hcatalog/conf/webhcat-env.sh',
+      content = Template('webhcat-env.sh.j2'),
+      owner = 'hcat',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;',
+      path = ['/bin'],
+      user = 'hcat',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/lib/hadoop-mapreduce/hadoop-streaming*.jar /apps/webhcat/hadoop-streaming.jar',
+      not_if = '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; hadoop fs -ls /apps/webhcat/hadoop-streaming.jar >/dev/null 2>&1',
+      user = 'hcat',
+      conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/pig.tar.gz /apps/webhcat/pig.tar.gz',
+      not_if = ' hadoop fs -ls /apps/webhcat/pig.tar.gz >/dev/null 2>&1',
+      user = 'hcat',
+      conf_dir = '/etc/hadoop/conf',
+    )
+    self.assertResourceCalled('ExecuteHadoop', 'fs -copyFromLocal /usr/share/HDP-webhcat/hive.tar.gz /apps/webhcat/hive.tar.gz',
+      not_if = ' hadoop fs -ls /apps/webhcat/hive.tar.gz >/dev/null 2>&1',
+      user = 'hcat',
+      conf_dir = '/etc/hadoop/conf',
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_service_check.py
new file mode 100644
index 0000000..bc63ad5
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/WEBHCAT/test_webhcat_service_check.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestServiceCheck(RMFTestCase):
+
+  def test_service_check_default(self):
+
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/service_check.py",
+                       classname="WebHCatServiceCheck",
+                       command="service_check",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('File', '/tmp/templetonSmoke.sh',
+                       content = StaticFile('templetonSmoke.sh'),
+                       mode = 493,
+    )
+    self.assertResourceCalled('Execute', 'sh /tmp/templetonSmoke.sh c6402.ambari.apache.org ambari-qa no_keytab False /usr/bin/kinit',
+                       logoutput = True,
+                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                       tries = 3,
+                       try_sleep = 5,
+    )
+    self.assertNoMoreResources()
+
+  def test_service_check_secured(self):
+
+    self.executeScript("1.3.2/services/WEBHCAT/package/scripts/service_check.py",
+                       classname="WebHCatServiceCheck",
+                       command="service_check",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('File', '/tmp/templetonSmoke.sh',
+                       content = StaticFile('templetonSmoke.sh'),
+                       mode = 493,
+    )
+    self.assertResourceCalled('Execute', 'sh /tmp/templetonSmoke.sh c6402.ambari.apache.org ambari-qa /etc/security/keytabs/smokeuser.headless.keytab True /usr/bin/kinit',
+                       logoutput = True,
+                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                       tries = 3,
+                       try_sleep = 5,
+    )
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_client.py
new file mode 100644
index 0000000..f36b83a
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_client.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestZookeeperClient(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
+                       classname = "ZookeeperClient",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
+      owner = 'zookeeper',
+      content = Template('zoo.cfg.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
+      owner = 'zookeeper',
+      content = Template('zookeeper-env.sh.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
+      owner = 'zookeeper',
+      content = Template('configuration.xsl.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/var/run/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
+      owner = 'zookeeper',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
+                       classname = "ZookeeperClient",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
+      owner = 'zookeeper',
+      content = Template('zoo.cfg.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
+      owner = 'zookeeper',
+      content = Template('zookeeper-env.sh.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
+      owner = 'zookeeper',
+      content = Template('configuration.xsl.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/var/run/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_client_jaas.conf',
+      owner = 'zookeeper',
+      content = Template('zookeeper_client_jaas.conf.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
+      owner = 'zookeeper',
+      group = 'hadoop',
+    )
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_server.py
new file mode 100644
index 0000000..876fd3e
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_server.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+class TestZookeeperServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
+                   classname = "ZookeeperServer",
+                   command = "configure",
+                   config_file="default.json"
+    )
+
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
+                   classname = "ZookeeperServer",
+                   command = "start",
+                   config_file="default.json"
+    )
+
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh start',
+                    not_if = 'ls /var/run/zookeeper/zookeeper_server.pid >/dev/null 2>&1 && ps `cat /var/run/zookeeper/zookeeper_server.pid` >/dev/null 2>&1',
+                    user = 'zookeeper'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
+                  classname = "ZookeeperServer",
+                  command = "stop",
+                  config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh stop',
+      user = 'zookeeper',
+    )
+    self.assertResourceCalled('Execute', 'rm -f /var/run/zookeeper/zookeeper_server.pid')
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
+                  classname = "ZookeeperServer",
+                  command = "configure",
+                  config_file="secured.json"
+    )
+
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
+                  classname = "ZookeeperServer",
+                  command = "start",
+                  config_file="secured.json"
+    )
+
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh start',
+                  not_if = 'ls /var/run/zookeeper/zookeeper_server.pid >/dev/null 2>&1 && ps `cat /var/run/zookeeper/zookeeper_server.pid` >/dev/null 2>&1',
+                  user = 'zookeeper'
+    )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
+                  classname = "ZookeeperServer",
+                  command = "stop",
+                  config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh stop',
+                  user = 'zookeeper',
+    )
+
+    self.assertResourceCalled('Execute', 'rm -f /var/run/zookeeper/zookeeper_server.pid')
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+
+    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
+      owner = 'zookeeper',
+      content = Template('zoo.cfg.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
+      owner = 'zookeeper',
+      content = Template('zookeeper-env.sh.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
+      owner = 'zookeeper',
+      content = Template('configuration.xsl.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/var/run/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/hadoop/zookeeper/myid',
+      content = '1',
+      mode = 420,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
+      owner = 'zookeeper',
+      group = 'hadoop',
+    )
+
+  def assert_configure_secured(self):
+
+    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo.cfg',
+      owner = 'zookeeper',
+      content = Template('zoo.cfg.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper-env.sh',
+      owner = 'zookeeper',
+      content = Template('zookeeper-env.sh.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/configuration.xsl',
+      owner = 'zookeeper',
+      content = Template('configuration.xsl.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('Directory', '/var/run/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/zookeeper',
+      owner = 'zookeeper',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('File', '/hadoop/zookeeper/myid',
+      content = '1',
+      mode = 420,
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_jaas.conf',
+      owner = 'zookeeper',
+      content = Template('zookeeper_jaas.conf.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zookeeper_client_jaas.conf',
+      owner = 'zookeeper',
+      content = Template('zookeeper_client_jaas.conf.j2'),
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('File', '/etc/zookeeper/conf/zoo_sample.cfg',
+      owner = 'zookeeper',
+      group = 'hadoop',
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_service_check.py
new file mode 100644
index 0000000..6cfd016
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/ZOOKEEPER/test_zookeeper_service_check.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestServiceCheck(RMFTestCase):
+
+  def test_service_check_default(self):
+
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/service_check.py",
+                       classname="ZookeeperServiceCheck",
+                       command="service_check",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('File', '/tmp/zkSmoke.sh',
+                       content = StaticFile('zkSmoke.sh'),
+                       mode = 493,
+    )
+    self.assertResourceCalled('Execute', 'sh /tmp/zkSmoke.sh /usr/lib/zookeeper/bin/zkCli.sh ambari-qa /etc/zookeeper/conf 2181 False /usr/bin/kinit no_keytab',
+                       logoutput = True,
+                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                       tries = 3,
+                       try_sleep = 5,
+    )
+    self.assertNoMoreResources()
+
+  def test_service_check_secured(self):
+
+    self.executeScript("1.3.2/services/ZOOKEEPER/package/scripts/service_check.py",
+                       classname="ZookeeperServiceCheck",
+                       command="service_check",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('File', '/tmp/zkSmoke.sh',
+                       content = StaticFile('zkSmoke.sh'),
+                       mode = 493,
+    )
+    self.assertResourceCalled('Execute', 'sh /tmp/zkSmoke.sh /usr/lib/zookeeper/bin/zkCli.sh ambari-qa /etc/zookeeper/conf 2181 True /usr/bin/kinit /etc/security/keytabs/smokeuser.headless.keytab',
+                       logoutput = True,
+                       path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                       tries = 3,
+                       try_sleep = 5,
+    )
+    self.assertNoMoreResources()


[11/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py
deleted file mode 100644
index 1d5db39..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=False)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py
deleted file mode 100644
index 6c00738..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=True)
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    #TODO remove this when config command will be implemented
-    self.configure(env)
-    oozie_service(action='start')
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index e9edcc9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def oozie_service(action = 'start'): # 'start' or 'stop'
-  import params
-
-  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
-    
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-      
-    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
-      
-    if db_connection_check_command:
-      Execute( db_connection_check_command)
-                  
-    Execute( cmd1,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-      ignore_failures = True
-    ) 
-    
-    Execute( cmd2,
-      user = params.oozie_user,       
-      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
-    )
-    
-    Execute( start_cmd,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-    )
-  elif action == 'stop':
-    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
-    Execute( stop_cmd,
-      only_if  = no_op_test
-    )
-
-  
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index 9e45f9d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
-conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
-ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
-oozie_libext_dir = "/usr/lib/oozie/libext"
-lzo_enabled = config['configurations']['global']['lzo_enabled']
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
-
-java_home = config['hostLevelParams']['java_home']
-oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
-oozie_lib_dir = "/var/lib/oozie/"
-oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
-
-jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-else:
-  jdbc_driver_jar = ""
-
-hostname = config["hostname"]
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-has_falcon_host = not len(falcon_host)  == 0
-falcon_home = '/usr/lib/falcon'
-
-#oozie-log4j.properties
-if ('oozie-log4j' in config['configurations']):
-  log4j_props = config['configurations']['oozie-log4j']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index 7c1c1f2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class OozieServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    # on HDP1 this file is different
-    smoke_test_file_name = 'oozieSmoke2.sh'
-
-    oozie_smoke_shell_file( smoke_test_file_name)
-  
-def oozie_smoke_shell_file(
-  file_name
-):
-  import params
-
-  File( format("/tmp/{file_name}"),
-    content = StaticFile(file_name),
-    mode = 0755
-  )
-  
-  if params.security_enabled:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
-  else:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
-
-  Execute( format("/tmp/{file_name}"),
-    command   = sh_cmd,
-    tries     = 3,
-    try_sleep = 5,
-    logoutput = True
-  )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServiceCheck().execute()
-  
-if __name__ == "__main__":
-  OozieServiceCheck().execute()
-  #main()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index c44fcf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2
deleted file mode 100644
index 270a1a8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-# export OOZIE_HTTP_PORT=11000
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2
deleted file mode 100644
index e4a2662..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig-log4j.xml
deleted file mode 100644
index a0db719..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig-log4j.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>log4j.logger.org.apache.pig</name>
-    <value>info, A</value>
-  </property>
-  <property>
-    <name>log4j.appender.A</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.A.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.A.layout.ConversionPattern</name>
-    <value>%-4r [%t] %-5p %c %x - %m%n</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
deleted file mode 100644
index 4cb6a5e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.12.0.2.1.1</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>pig-log4j</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py
deleted file mode 100644
index 781e635..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-
-pig_conf_dir = "/etc/pig/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
-
-#log4j.properties
-if ('pig-log4j' in config['configurations']):
-  log4j_props = config['configurations']['pig-log4j']
-else:
-  log4j_props = None
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py
deleted file mode 100644
index d8aadf7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  pig_TemplateConfig( ['pig-env.sh','pig.properties'])
-
-  if (params.log4j_props != None):
-    PropertiesFile('log4j.properties',
-      dir=params.pig_conf_dir,
-      properties=params.log4j_props,
-      mode=0664,
-      owner=params.hdfs_user,
-      group=params.user_group,
-    )
-  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user
-    )
-
-def pig_TemplateConfig(name):
-  import params
-  
-  if not isinstance(name, list):
-    name = [name]
-    
-  for x in name:
-    TemplateConfig( format("{pig_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index acd0cb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from pig import pig
-
-         
-class PigClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py
deleted file mode 100644
index 3cca087..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class PigServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    input_file = 'passwd'
-    output_file = "pigsmoke.out"
-  
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
-    test_cmd = format("fs -test -e {output_file}")
-  
-    ExecuteHadoop( create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-  
-    File( '/tmp/pigSmoke.sh',
-      content = StaticFile("pigSmoke.sh"),
-      mode = 0755
-    )
-  
-    Execute( "pig /tmp/pigSmoke.sh",
-      tries     = 3,
-      try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      user      = params.smokeuser,
-      logoutput = True
-    )
-  
-    ExecuteHadoop( test_cmd,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigServiceCheck().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2
deleted file mode 100644
index ad10c21..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-if [ -d "/usr/lib/tez" ]; then
-  PIG_OPTS="-Dmapreduce.framework.name=yarn"
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2
deleted file mode 100644
index 6fcb233..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counter than hadoop configured limit
-#pig.disable.counter=true
-hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml
deleted file mode 100644
index ea115c7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.4.2.1.1</version>
-
-      <components>
-        <component>
-          <name>SQOOP</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>sqoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py
deleted file mode 100644
index 7de3367..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-security_enabled = config['configurations']['global']['security_enabled']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-sqoop_conf_dir = "/usr/lib/sqoop/conf"
-hbase_home = "/usr"
-hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
-sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = "sqoop"
-
-keytab_path = config['configurations']['global']['keytab_path']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py
deleted file mode 100644
index c42501a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-
-
-class SqoopServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
-    Execute("sqoop version",
-            user = params.smokeuser,
-            logoutput = True
-    )
-
-if __name__ == "__main__":
-  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py
deleted file mode 100644
index 148a833..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-def sqoop(type=None):
-  import params
-  Link(params.sqoop_lib + "/mysql-connector-java.jar",
-       to = '/usr/share/java/mysql-connector-java.jar'
-  )
-  Directory(params.sqoop_conf_dir,
-            owner = params.sqoop_user,
-            group = params.user_group
-  )
-  sqoop_TemplateConfig("sqoop-env.sh")
-  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
-          owner = params.sqoop_user,
-          group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  pass
-
-def sqoop_TemplateConfig(name, tag=None):
-  import params
-  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
-                  owner = params.sqoop_user,
-                  template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py
deleted file mode 100644
index 6829557..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from sqoop import sqoop
-
-
-class SqoopClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    sqoop(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2
deleted file mode 100644
index 90cbc75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 39b901e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index bf08814..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <comment>This is comment for WEBHCAT service</comment>
-      <version>0.12.0.2.1.1</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh
deleted file mode 100644
index cefc4f0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py
deleted file mode 100644
index 83211e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-webhcat_user = config['configurations']['global']['webhcat_user']
-download_url = config['configurations']['global']['apache_artifacts_download_url']
-
-config_dir = '/etc/hcatalog/conf'
-
-templeton_log_dir = config['configurations']['global']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-hadoop_home = '/usr'
-user_group = config['configurations']['global']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py
deleted file mode 100644
index 58b4d25..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File('/tmp/templetonSmoke.sh',
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_enabled} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py
deleted file mode 100644
index 21dde6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py
deleted file mode 100644
index c013624..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import sys
-
-
-def webhcat():
-  import params
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.config_dir,
-            owner=params.webhcat_user,
-            group=params.user_group)
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-  )
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=Template('webhcat-env.sh.j2')
-  )
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  copyFromLocal(path='/usr/lib/hadoop-mapreduce/hadoop-streaming*.jar',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
-                kinnit_if_needed=kinit_if_needed
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
-  )
-
-
-def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
-  import params
-
-  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
-  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
-
-  ExecuteHadoop(copy_cmd,
-                not_if=unless_cmd,
-                user=owner,
-                conf_dir=params.hadoop_conf_dir)
-
-  if not owner:
-    chown = None
-  else:
-    if not group:
-      chown = owner
-    else:
-      chown = format('{owner}:{group}')
-
-  if not chown:
-    chown_cmd = format("fs -chown {chown} {dest_dir}")
-
-    ExecuteHadoop(copy_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)
-
-  if not mode:
-    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
-
-    ExecuteHadoop(chmod_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py
deleted file mode 100644
index 4365111..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-
-class WebHCatServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    webhcat_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-
-if __name__ == "__main__":
-  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py
deleted file mode 100644
index 12c3854..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-def webhcat_service(action='start'):
-  import params
-
-  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
-
-  if action == 'start':
-    demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
-    Execute(demon_cmd,
-            user=params.webhcat_user,
-            not_if=no_op_test
-    )
-  elif action == 'stop':
-    demon_cmd = format('{cmd} stop')
-    Execute(demon_cmd,
-            user=params.webhcat_user
-    )
-    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2
deleted file mode 100644
index 9ea4a79..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE={{pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop


[51/51] [partial] git commit: AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
AMBARI-4491. Move all the supported versions in Baikal for stack to
python code (remove dependence on puppet). (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/43f14b34
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/43f14b34
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/43f14b34

Branch: refs/heads/trunk
Commit: 43f14b349cf529c7cda1a161f14bd1ea2ad54884
Parents: 37f11eb
Author: Andrew Onischuk <ao...@hortonworks.com>
Authored: Fri Jan 31 11:49:08 2014 -0800
Committer: Andrew Onischuk <ao...@hortonworks.com>
Committed: Fri Jan 31 11:49:08 2014 -0800

----------------------------------------------------------------------
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 +
 .../1.3.2/hooks/before-INSTALL/scripts/hook.py  |    36 +
 .../hooks/before-INSTALL/scripts/params.py      |    81 +
 .../scripts/shared_initialization.py            |   107 +
 .../hooks/before-START/files/checkForFormat.sh  |    62 +
 .../before-START/files/task-log4j.properties    |   132 +
 .../1.3.2/hooks/before-START/scripts/hook.py    |    37 +
 .../1.3.2/hooks/before-START/scripts/params.py  |   197 +
 .../scripts/shared_initialization.py            |   317 +
 .../templates/commons-logging.properties.j2     |    25 +
 .../templates/exclude_hosts_list.j2             |     3 +
 .../before-START/templates/hadoop-env.sh.j2     |   121 +
 .../templates/hadoop-metrics2.properties.j2     |    45 +
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 +
 .../before-START/templates/health_check-v2.j2   |    91 +
 .../before-START/templates/health_check.j2      |   118 +
 .../templates/include_hosts_list.j2             |     3 +
 .../hooks/before-START/templates/slaves.j2      |     3 +
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 +
 .../templates/taskcontroller.cfg.j2             |    20 +
 .../HDP/1.3.2/services/FLUME/metainfo.xml       |     3 +-
 .../HDP/1.3.2/services/GANGLIA/metainfo.xml     |   109 +-
 .../GANGLIA/package/files/checkGmetad.sh        |    37 +
 .../GANGLIA/package/files/checkGmond.sh         |    62 +
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 +
 .../services/GANGLIA/package/files/gmetad.init  |    73 +
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 +
 .../services/GANGLIA/package/files/gmond.init   |    73 +
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 +
 .../1.3.2/services/GANGLIA/package/files/rrd.py |   213 +
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 +
 .../GANGLIA/package/files/setupGanglia.sh       |   141 +
 .../GANGLIA/package/files/startGmetad.sh        |    64 +
 .../GANGLIA/package/files/startGmond.sh         |    80 +
 .../GANGLIA/package/files/startRrdcached.sh     |    69 +
 .../GANGLIA/package/files/stopGmetad.sh         |    43 +
 .../services/GANGLIA/package/files/stopGmond.sh |    54 +
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 +
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 +
 .../services/GANGLIA/package/scripts/ganglia.py |   106 +
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   140 +
 .../package/scripts/ganglia_monitor_service.py  |    31 +
 .../GANGLIA/package/scripts/ganglia_server.py   |   181 +
 .../package/scripts/ganglia_server_service.py   |    27 +
 .../services/GANGLIA/package/scripts/params.py  |    74 +
 .../GANGLIA/package/scripts/status_params.py    |    25 +
 .../package/templates/gangliaClusters.conf.j2   |    34 +
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 +
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 +
 .../HBASE/configuration/hbase-log4j.xml         |   183 +
 .../HDP/1.3.2/services/HBASE/metainfo.xml       |   114 +-
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 +
 .../services/HBASE/package/scripts/__init__.py  |    19 +
 .../services/HBASE/package/scripts/functions.py |    40 +
 .../services/HBASE/package/scripts/hbase.py     |   107 +
 .../HBASE/package/scripts/hbase_client.py       |    52 +
 .../HBASE/package/scripts/hbase_decommission.py |    53 +
 .../HBASE/package/scripts/hbase_master.py       |    81 +
 .../HBASE/package/scripts/hbase_regionserver.py |    75 +
 .../HBASE/package/scripts/hbase_service.py      |    46 +
 .../services/HBASE/package/scripts/params.py    |    95 +
 .../HBASE/package/scripts/service_check.py      |    89 +
 .../HBASE/package/scripts/status_params.py      |    25 +
 .../hadoop-metrics.properties-GANGLIA-MASTER.j2 |    50 +
 .../hadoop-metrics.properties-GANGLIA-RS.j2     |    50 +
 .../templates/hadoop-metrics.properties.j2      |    50 +
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 +
 .../package/templates/hbase_client_jaas.conf.j2 |    23 +
 .../templates/hbase_grant_permissions.j2        |    21 +
 .../package/templates/hbase_master_jaas.conf.j2 |    25 +
 .../templates/hbase_regionserver_jaas.conf.j2   |    25 +
 .../HBASE/package/templates/regionservers.j2    |     2 +
 .../services/HCATALOG/configuration/global.xml  |    45 -
 .../HDP/1.3.2/services/HCATALOG/metainfo.xml    |    30 -
 .../services/HDFS/configuration/hdfs-log4j.xml  |   305 +
 .../stacks/HDP/1.3.2/services/HDFS/metainfo.xml |   135 +-
 .../HDFS/package/files/checkForFormat.sh        |    62 +
 .../services/HDFS/package/files/checkWebUI.py   |    53 +
 .../services/HDFS/package/scripts/datanode.py   |    57 +
 .../HDFS/package/scripts/hdfs_client.py         |    52 +
 .../HDFS/package/scripts/hdfs_datanode.py       |    60 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   192 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 +
 .../services/HDFS/package/scripts/namenode.py   |    66 +
 .../services/HDFS/package/scripts/params.py     |   170 +
 .../HDFS/package/scripts/service_check.py       |   106 +
 .../services/HDFS/package/scripts/snamenode.py  |    64 +
 .../HDFS/package/scripts/status_params.py       |    31 +
 .../services/HDFS/package/scripts/utils.py      |   133 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../services/HIVE/configuration/global.xml      |    23 +
 .../HIVE/configuration/hive-exec-log4j.xml      |   122 +
 .../services/HIVE/configuration/hive-log4j.xml  |   130 +
 .../stacks/HDP/1.3.2/services/HIVE/metainfo.xml |   177 +-
 .../services/HIVE/package/files/addMysqlUser.sh |    41 +
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 +
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 +
 .../services/HIVE/package/files/hiveserver2.sql |    23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 +
 .../services/HIVE/package/files/pigSmoke.sh     |    18 +
 .../HIVE/package/files/startHiveserver2.sh      |    22 +
 .../HIVE/package/files/startMetastore.sh        |    22 +
 .../services/HIVE/package/scripts/__init__.py   |    19 +
 .../1.3.2/services/HIVE/package/scripts/hcat.py |    47 +
 .../HIVE/package/scripts/hcat_client.py         |    41 +
 .../HIVE/package/scripts/hcat_service_check.py  |    64 +
 .../1.3.2/services/HIVE/package/scripts/hive.py |   155 +
 .../HIVE/package/scripts/hive_client.py         |    41 +
 .../HIVE/package/scripts/hive_metastore.py      |    63 +
 .../HIVE/package/scripts/hive_server.py         |    63 +
 .../HIVE/package/scripts/hive_service.py        |    56 +
 .../HIVE/package/scripts/mysql_server.py        |    77 +
 .../HIVE/package/scripts/mysql_service.py       |    38 +
 .../services/HIVE/package/scripts/params.py     |   135 +
 .../HIVE/package/scripts/service_check.py       |    56 +
 .../HIVE/package/scripts/status_params.py       |    30 +
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 +
 .../HIVE/package/templates/hive-env.sh.j2       |    55 +
 .../stacks/HDP/1.3.2/services/HUE/metainfo.xml  |     3 +-
 .../MAPREDUCE/configuration/mapreduce-log4j.xml |    73 +
 .../HDP/1.3.2/services/MAPREDUCE/metainfo.xml   |    93 +-
 .../MAPREDUCE/package/scripts/client.py         |    43 +
 .../MAPREDUCE/package/scripts/historyserver.py  |    60 +
 .../MAPREDUCE/package/scripts/jobtracker.py     |    83 +
 .../MAPREDUCE/package/scripts/mapreduce.py      |    50 +
 .../MAPREDUCE/package/scripts/params.py         |    54 +
 .../MAPREDUCE/package/scripts/service.py        |    56 +
 .../MAPREDUCE/package/scripts/service_check.py  |    89 +
 .../MAPREDUCE/package/scripts/status_params.py  |    33 +
 .../MAPREDUCE/package/scripts/tasktracker.py    |   104 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../HDP/1.3.2/services/NAGIOS/metainfo.xml      |    94 +-
 .../NAGIOS/package/files/check_aggregate.php    |   243 +
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 +
 .../package/files/check_datanode_storage.php    |   100 +
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 +
 .../package/files/check_hdfs_capacity.php       |   109 +
 .../files/check_hive_metastore_status.sh        |    45 +
 .../NAGIOS/package/files/check_hue_status.sh    |    31 +
 .../files/check_mapred_local_dir_used.sh        |    34 +
 .../package/files/check_name_dir_status.php     |    93 +
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 +
 .../package/files/check_nodemanager_health.sh   |    44 +
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 +
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 +
 .../package/files/check_templeton_status.sh     |    45 +
 .../NAGIOS/package/files/check_webui.sh         |    87 +
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 +
 .../NAGIOS/package/scripts/functions.py         |    31 +
 .../services/NAGIOS/package/scripts/nagios.py   |    97 +
 .../NAGIOS/package/scripts/nagios_server.py     |    87 +
 .../package/scripts/nagios_server_config.py     |    91 +
 .../NAGIOS/package/scripts/nagios_service.py    |    36 +
 .../services/NAGIOS/package/scripts/params.py   |   168 +
 .../NAGIOS/package/scripts/status_params.py     |    26 +
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 +
 .../package/templates/hadoop-commands.cfg.j2    |   114 +
 .../package/templates/hadoop-hostgroups.cfg.j2  |    33 +
 .../package/templates/hadoop-hosts.cfg.j2       |    34 +
 .../templates/hadoop-servicegroups.cfg.j2       |    98 +
 .../package/templates/hadoop-services.cfg.j2    |   714 +
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 ++
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 +
 .../services/NAGIOS/package/templates/nagios.j2 |   146 +
 .../NAGIOS/package/templates/resource.cfg.j2    |    51 +
 .../OOZIE/configuration/oozie-log4j.xml         |   183 +
 .../HDP/1.3.2/services/OOZIE/metainfo.xml       |   104 +-
 .../services/OOZIE/package/files/oozieSmoke.sh  |    93 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 +
 .../services/OOZIE/package/scripts/oozie.py     |   111 +
 .../OOZIE/package/scripts/oozie_client.py       |    53 +
 .../OOZIE/package/scripts/oozie_server.py       |    65 +
 .../OOZIE/package/scripts/oozie_service.py      |    45 +
 .../services/OOZIE/package/scripts/params.py    |    70 +
 .../OOZIE/package/scripts/service_check.py      |    47 +
 .../OOZIE/package/scripts/status_params.py      |    26 +
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 +
 .../services/PIG/configuration/pig-log4j.xml    |    40 +
 .../stacks/HDP/1.3.2/services/PIG/metainfo.xml  |    48 +-
 .../services/PIG/package/files/pigSmoke.sh      |    18 +
 .../services/PIG/package/scripts/params.py      |    42 +
 .../1.3.2/services/PIG/package/scripts/pig.py   |    62 +
 .../services/PIG/package/scripts/pig_client.py  |    52 +
 .../PIG/package/scripts/service_check.py        |    75 +
 .../PIG/package/templates/pig-env.sh.j2         |    17 +
 .../PIG/package/templates/pig.properties.j2     |    55 +
 .../HDP/1.3.2/services/SQOOP/metainfo.xml       |    63 +-
 .../services/SQOOP/package/scripts/__init__.py  |    18 +
 .../services/SQOOP/package/scripts/params.py    |    36 +
 .../SQOOP/package/scripts/service_check.py      |    36 +
 .../services/SQOOP/package/scripts/sqoop.py     |    51 +
 .../SQOOP/package/scripts/sqoop_client.py       |    40 +
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 +
 .../HDP/1.3.2/services/WEBHCAT/metainfo.xml     |    90 +-
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 +
 .../WEBHCAT/package/scripts/__init__.py         |    21 +
 .../services/WEBHCAT/package/scripts/params.py  |    51 +
 .../WEBHCAT/package/scripts/service_check.py    |    45 +
 .../WEBHCAT/package/scripts/status_params.py    |    26 +
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 +
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 +
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 +
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |    84 +
 .../HDP/1.3.2/services/ZOOKEEPER/metainfo.xml   |    58 +-
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 +
 .../ZOOKEEPER/package/files/zkService.sh        |    26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 +
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 +
 .../ZOOKEEPER/package/scripts/params.py         |    77 +
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 +
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   106 +
 .../package/scripts/zookeeper_client.py         |    43 +
 .../package/scripts/zookeeper_server.py         |    55 +
 .../package/scripts/zookeeper_service.py        |    43 +
 .../package/templates/configuration.xsl.j2      |    37 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 +
 .../package/templates/zookeeper-env.sh.j2       |    25 +
 .../templates/zookeeper_client_jaas.conf.j2     |    22 +
 .../package/templates/zookeeper_jaas.conf.j2    |    25 +
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 -
 .../1.3.3/hooks/before-INSTALL/scripts/hook.py  |    36 -
 .../hooks/before-INSTALL/scripts/params.py      |    81 -
 .../scripts/shared_initialization.py            |   107 -
 .../hooks/before-START/files/checkForFormat.sh  |    62 -
 .../before-START/files/task-log4j.properties    |   132 -
 .../1.3.3/hooks/before-START/scripts/hook.py    |    37 -
 .../1.3.3/hooks/before-START/scripts/params.py  |   197 -
 .../scripts/shared_initialization.py            |   317 -
 .../templates/commons-logging.properties.j2     |    25 -
 .../templates/exclude_hosts_list.j2             |     3 -
 .../before-START/templates/hadoop-env.sh.j2     |   121 -
 .../templates/hadoop-metrics2.properties.j2     |    45 -
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 -
 .../before-START/templates/health_check-v2.j2   |    91 -
 .../before-START/templates/health_check.j2      |   118 -
 .../templates/include_hosts_list.j2             |     3 -
 .../hooks/before-START/templates/slaves.j2      |     3 -
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 -
 .../templates/taskcontroller.cfg.j2             |    20 -
 .../resources/stacks/HDP/1.3.3/metainfo.xml     |     1 +
 .../services/FLUME/configuration/global.xml     |    24 -
 .../HDP/1.3.3/services/FLUME/metainfo.xml       |    31 -
 .../services/GANGLIA/configuration/global.xml   |    55 -
 .../HDP/1.3.3/services/GANGLIA/metainfo.xml     |   113 -
 .../GANGLIA/package/files/checkGmetad.sh        |    37 -
 .../GANGLIA/package/files/checkGmond.sh         |    62 -
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 -
 .../services/GANGLIA/package/files/gmetad.init  |    73 -
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 -
 .../services/GANGLIA/package/files/gmond.init   |    73 -
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 -
 .../1.3.3/services/GANGLIA/package/files/rrd.py |   213 -
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 -
 .../GANGLIA/package/files/setupGanglia.sh       |   141 -
 .../GANGLIA/package/files/startGmetad.sh        |    64 -
 .../GANGLIA/package/files/startGmond.sh         |    80 -
 .../GANGLIA/package/files/startRrdcached.sh     |    69 -
 .../GANGLIA/package/files/stopGmetad.sh         |    43 -
 .../services/GANGLIA/package/files/stopGmond.sh |    54 -
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 -
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 -
 .../services/GANGLIA/package/scripts/ganglia.py |   106 -
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   140 -
 .../package/scripts/ganglia_monitor_service.py  |    31 -
 .../GANGLIA/package/scripts/ganglia_server.py   |   181 -
 .../package/scripts/ganglia_server_service.py   |    27 -
 .../services/GANGLIA/package/scripts/params.py  |    74 -
 .../GANGLIA/package/scripts/status_params.py    |    25 -
 .../package/templates/gangliaClusters.conf.j2   |    34 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 -
 .../services/HBASE/configuration/global.xml     |   160 -
 .../HBASE/configuration/hbase-log4j.xml         |   183 -
 .../HBASE/configuration/hbase-policy.xml        |    53 -
 .../services/HBASE/configuration/hbase-site.xml |   370 -
 .../HDP/1.3.3/services/HBASE/metainfo.xml       |   124 -
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 -
 .../services/HBASE/package/scripts/__init__.py  |    19 -
 .../services/HBASE/package/scripts/functions.py |    40 -
 .../services/HBASE/package/scripts/hbase.py     |   107 -
 .../HBASE/package/scripts/hbase_client.py       |    52 -
 .../HBASE/package/scripts/hbase_decommission.py |    53 -
 .../HBASE/package/scripts/hbase_master.py       |    81 -
 .../HBASE/package/scripts/hbase_regionserver.py |    75 -
 .../HBASE/package/scripts/hbase_service.py      |    46 -
 .../services/HBASE/package/scripts/params.py    |    95 -
 .../HBASE/package/scripts/service_check.py      |    89 -
 .../HBASE/package/scripts/status_params.py      |    25 -
 .../hadoop-metrics.properties-GANGLIA-MASTER.j2 |    50 -
 .../hadoop-metrics.properties-GANGLIA-RS.j2     |    50 -
 .../templates/hadoop-metrics.properties.j2      |    50 -
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 -
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 -
 .../package/templates/hbase_client_jaas.conf.j2 |    23 -
 .../templates/hbase_grant_permissions.j2        |    21 -
 .../package/templates/hbase_master_jaas.conf.j2 |    25 -
 .../templates/hbase_regionserver_jaas.conf.j2   |    25 -
 .../HBASE/package/templates/regionservers.j2    |     2 -
 .../services/HDFS/configuration/core-site.xml   |   255 -
 .../services/HDFS/configuration/global.xml      |   187 -
 .../HDFS/configuration/hadoop-policy.xml        |   134 -
 .../services/HDFS/configuration/hdfs-log4j.xml  |   305 -
 .../services/HDFS/configuration/hdfs-site.xml   |   476 -
 .../stacks/HDP/1.3.3/services/HDFS/metainfo.xml |   147 -
 .../HDFS/package/files/checkForFormat.sh        |    62 -
 .../services/HDFS/package/files/checkWebUI.py   |    53 -
 .../services/HDFS/package/scripts/datanode.py   |    57 -
 .../HDFS/package/scripts/hdfs_client.py         |    52 -
 .../HDFS/package/scripts/hdfs_datanode.py       |    60 -
 .../HDFS/package/scripts/hdfs_namenode.py       |   192 -
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 -
 .../services/HDFS/package/scripts/namenode.py   |    66 -
 .../services/HDFS/package/scripts/params.py     |   170 -
 .../HDFS/package/scripts/service_check.py       |   106 -
 .../services/HDFS/package/scripts/snamenode.py  |    64 -
 .../HDFS/package/scripts/status_params.py       |    31 -
 .../services/HDFS/package/scripts/utils.py      |   133 -
 .../package/templates/exclude_hosts_list.j2     |     3 -
 .../services/HIVE/configuration/global.xml      |   148 -
 .../HIVE/configuration/hive-exec-log4j.xml      |   122 -
 .../services/HIVE/configuration/hive-log4j.xml  |   130 -
 .../services/HIVE/configuration/hive-site.xml   |   250 -
 .../stacks/HDP/1.3.3/services/HIVE/metainfo.xml |   188 -
 .../services/HIVE/package/files/addMysqlUser.sh |    41 -
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 -
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 -
 .../services/HIVE/package/files/hiveserver2.sql |    23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 -
 .../services/HIVE/package/files/pigSmoke.sh     |    18 -
 .../HIVE/package/files/startHiveserver2.sh      |    22 -
 .../HIVE/package/files/startMetastore.sh        |    22 -
 .../services/HIVE/package/scripts/__init__.py   |    19 -
 .../1.3.3/services/HIVE/package/scripts/hcat.py |    47 -
 .../HIVE/package/scripts/hcat_client.py         |    41 -
 .../HIVE/package/scripts/hcat_service_check.py  |    64 -
 .../1.3.3/services/HIVE/package/scripts/hive.py |   155 -
 .../HIVE/package/scripts/hive_client.py         |    41 -
 .../HIVE/package/scripts/hive_metastore.py      |    63 -
 .../HIVE/package/scripts/hive_server.py         |    63 -
 .../HIVE/package/scripts/hive_service.py        |    56 -
 .../HIVE/package/scripts/mysql_server.py        |    77 -
 .../HIVE/package/scripts/mysql_service.py       |    38 -
 .../services/HIVE/package/scripts/params.py     |   135 -
 .../HIVE/package/scripts/service_check.py       |    56 -
 .../HIVE/package/scripts/status_params.py       |    30 -
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 -
 .../HIVE/package/templates/hive-env.sh.j2       |    55 -
 .../1.3.3/services/HUE/configuration/global.xml |    35 -
 .../services/HUE/configuration/hue-site.xml     |   290 -
 .../stacks/HDP/1.3.3/services/HUE/metainfo.xml  |    32 -
 .../configuration/capacity-scheduler.xml        |   195 -
 .../MAPREDUCE/configuration/core-site.xml       |    20 -
 .../services/MAPREDUCE/configuration/global.xml |   160 -
 .../configuration/mapred-queue-acls.xml         |    52 -
 .../MAPREDUCE/configuration/mapred-site.xml     |   623 -
 .../MAPREDUCE/configuration/mapreduce-log4j.xml |    73 -
 .../HDP/1.3.3/services/MAPREDUCE/metainfo.xml   |   103 -
 .../MAPREDUCE/package/scripts/client.py         |    43 -
 .../MAPREDUCE/package/scripts/historyserver.py  |    60 -
 .../MAPREDUCE/package/scripts/jobtracker.py     |    83 -
 .../MAPREDUCE/package/scripts/mapreduce.py      |    50 -
 .../MAPREDUCE/package/scripts/params.py         |    54 -
 .../MAPREDUCE/package/scripts/service.py        |    56 -
 .../MAPREDUCE/package/scripts/service_check.py  |    89 -
 .../MAPREDUCE/package/scripts/status_params.py  |    33 -
 .../MAPREDUCE/package/scripts/tasktracker.py    |   104 -
 .../package/templates/exclude_hosts_list.j2     |     3 -
 .../services/NAGIOS/configuration/global.xml    |    50 -
 .../HDP/1.3.3/services/NAGIOS/metainfo.xml      |   106 -
 .../NAGIOS/package/files/check_aggregate.php    |   243 -
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 -
 .../package/files/check_datanode_storage.php    |   100 -
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 -
 .../package/files/check_hdfs_capacity.php       |   109 -
 .../files/check_hive_metastore_status.sh        |    45 -
 .../NAGIOS/package/files/check_hue_status.sh    |    31 -
 .../files/check_mapred_local_dir_used.sh        |    34 -
 .../package/files/check_name_dir_status.php     |    93 -
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 -
 .../package/files/check_nodemanager_health.sh   |    44 -
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 -
 .../package/files/check_templeton_status.sh     |    45 -
 .../NAGIOS/package/files/check_webui.sh         |    87 -
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 -
 .../NAGIOS/package/scripts/functions.py         |    31 -
 .../services/NAGIOS/package/scripts/nagios.py   |    97 -
 .../NAGIOS/package/scripts/nagios_server.py     |    87 -
 .../package/scripts/nagios_server_config.py     |    91 -
 .../NAGIOS/package/scripts/nagios_service.py    |    36 -
 .../services/NAGIOS/package/scripts/params.py   |   168 -
 .../NAGIOS/package/scripts/status_params.py     |    26 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 -
 .../package/templates/hadoop-commands.cfg.j2    |   114 -
 .../package/templates/hadoop-hostgroups.cfg.j2  |    33 -
 .../package/templates/hadoop-hosts.cfg.j2       |    34 -
 .../templates/hadoop-servicegroups.cfg.j2       |    98 -
 .../package/templates/hadoop-services.cfg.j2    |   714 -
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 --
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 -
 .../services/NAGIOS/package/templates/nagios.j2 |   146 -
 .../NAGIOS/package/templates/resource.cfg.j2    |    51 -
 .../services/OOZIE/configuration/global.xml     |   105 -
 .../OOZIE/configuration/oozie-log4j.xml         |   183 -
 .../services/OOZIE/configuration/oozie-site.xml |   253 -
 .../HDP/1.3.3/services/OOZIE/metainfo.xml       |   114 -
 .../services/OOZIE/package/files/oozieSmoke.sh  |    93 -
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 -
 .../services/OOZIE/package/scripts/oozie.py     |   111 -
 .../OOZIE/package/scripts/oozie_client.py       |    53 -
 .../OOZIE/package/scripts/oozie_server.py       |    65 -
 .../OOZIE/package/scripts/oozie_service.py      |    45 -
 .../services/OOZIE/package/scripts/params.py    |    70 -
 .../OOZIE/package/scripts/service_check.py      |    47 -
 .../OOZIE/package/scripts/status_params.py      |    26 -
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 -
 .../services/PIG/configuration/pig-log4j.xml    |    40 -
 .../services/PIG/configuration/pig.properties   |    52 -
 .../stacks/HDP/1.3.3/services/PIG/metainfo.xml  |    62 -
 .../services/PIG/package/files/pigSmoke.sh      |    18 -
 .../services/PIG/package/scripts/params.py      |    42 -
 .../1.3.3/services/PIG/package/scripts/pig.py   |    62 -
 .../services/PIG/package/scripts/pig_client.py  |    52 -
 .../PIG/package/scripts/service_check.py        |    75 -
 .../PIG/package/templates/pig-env.sh.j2         |    17 -
 .../PIG/package/templates/pig.properties.j2     |    55 -
 .../HDP/1.3.3/services/SQOOP/metainfo.xml       |    77 -
 .../services/SQOOP/package/scripts/__init__.py  |    18 -
 .../services/SQOOP/package/scripts/params.py    |    36 -
 .../SQOOP/package/scripts/service_check.py      |    36 -
 .../services/SQOOP/package/scripts/sqoop.py     |    51 -
 .../SQOOP/package/scripts/sqoop_client.py       |    40 -
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 -
 .../WEBHCAT/configuration/webhcat-site.xml      |   126 -
 .../HDP/1.3.3/services/WEBHCAT/metainfo.xml     |    97 -
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 -
 .../WEBHCAT/package/scripts/__init__.py         |    21 -
 .../services/WEBHCAT/package/scripts/params.py  |    51 -
 .../WEBHCAT/package/scripts/service_check.py    |    45 -
 .../WEBHCAT/package/scripts/status_params.py    |    26 -
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 -
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 -
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 -
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 -
 .../services/ZOOKEEPER/configuration/global.xml |    75 -
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |    84 -
 .../HDP/1.3.3/services/ZOOKEEPER/metainfo.xml   |    73 -
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 -
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 -
 .../ZOOKEEPER/package/files/zkService.sh        |    26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 -
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 -
 .../ZOOKEEPER/package/scripts/params.py         |    77 -
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 -
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   106 -
 .../package/scripts/zookeeper_client.py         |    43 -
 .../package/scripts/zookeeper_server.py         |    55 -
 .../package/scripts/zookeeper_service.py        |    43 -
 .../package/templates/configuration.xsl.j2      |    37 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 -
 .../package/templates/zookeeper-env.sh.j2       |    25 -
 .../templates/zookeeper_client_jaas.conf.j2     |    22 -
 .../package/templates/zookeeper_jaas.conf.j2    |    25 -
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 +
 .../2.0.6/hooks/before-INSTALL/scripts/hook.py  |    36 +
 .../hooks/before-INSTALL/scripts/params.py      |    84 +
 .../scripts/shared_initialization.py            |   113 +
 .../hooks/before-START/files/checkForFormat.sh  |    62 +
 .../before-START/files/task-log4j.properties    |   132 +
 .../2.0.6/hooks/before-START/scripts/hook.py    |    37 +
 .../2.0.6/hooks/before-START/scripts/params.py  |   197 +
 .../scripts/shared_initialization.py            |   322 +
 .../templates/commons-logging.properties.j2     |    25 +
 .../templates/exclude_hosts_list.j2             |     3 +
 .../before-START/templates/hadoop-env.sh.j2     |   125 +
 .../templates/hadoop-metrics2.properties.j2     |    45 +
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 +
 .../before-START/templates/health_check-v2.j2   |    91 +
 .../before-START/templates/health_check.j2      |   118 +
 .../templates/include_hosts_list.j2             |     3 +
 .../hooks/before-START/templates/slaves.j2      |     3 +
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 +
 .../templates/taskcontroller.cfg.j2             |    20 +
 .../services/GANGLIA/configuration/global.xml   |    51 -
 .../HDP/2.0.6/services/GANGLIA/metainfo.xml     |   100 +-
 .../GANGLIA/package/files/checkGmetad.sh        |    37 +
 .../GANGLIA/package/files/checkGmond.sh         |    62 +
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 +
 .../services/GANGLIA/package/files/gmetad.init  |    73 +
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 +
 .../services/GANGLIA/package/files/gmond.init   |    73 +
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 +
 .../2.0.6/services/GANGLIA/package/files/rrd.py |   213 +
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 +
 .../GANGLIA/package/files/setupGanglia.sh       |   141 +
 .../GANGLIA/package/files/startGmetad.sh        |    64 +
 .../GANGLIA/package/files/startGmond.sh         |    80 +
 .../GANGLIA/package/files/startRrdcached.sh     |    69 +
 .../GANGLIA/package/files/stopGmetad.sh         |    43 +
 .../services/GANGLIA/package/files/stopGmond.sh |    54 +
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 +
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 +
 .../services/GANGLIA/package/scripts/ganglia.py |    97 +
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   139 +
 .../package/scripts/ganglia_monitor_service.py  |    31 +
 .../GANGLIA/package/scripts/ganglia_server.py   |   197 +
 .../package/scripts/ganglia_server_service.py   |    27 +
 .../services/GANGLIA/package/scripts/params.py  |    80 +
 .../GANGLIA/package/scripts/status_params.py    |    25 +
 .../package/templates/gangliaClusters.conf.j2   |    35 +
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 +
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 +
 .../HBASE/configuration/hbase-log4j.xml         |   183 +
 .../HDP/2.0.6/services/HBASE/metainfo.xml       |    84 +-
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 +
 .../services/HBASE/package/scripts/__init__.py  |    19 +
 .../services/HBASE/package/scripts/functions.py |    40 +
 .../services/HBASE/package/scripts/hbase.py     |   107 +
 .../HBASE/package/scripts/hbase_client.py       |    52 +
 .../HBASE/package/scripts/hbase_decommission.py |    53 +
 .../HBASE/package/scripts/hbase_master.py       |    81 +
 .../HBASE/package/scripts/hbase_regionserver.py |    75 +
 .../HBASE/package/scripts/hbase_service.py      |    46 +
 .../services/HBASE/package/scripts/params.py    |    95 +
 .../HBASE/package/scripts/service_check.py      |    89 +
 .../HBASE/package/scripts/status_params.py      |    25 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    62 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    62 +
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 +
 .../package/templates/hbase_client_jaas.conf.j2 |     5 +
 .../templates/hbase_grant_permissions.j2        |    21 +
 .../package/templates/hbase_master_jaas.conf.j2 |     8 +
 .../templates/hbase_regionserver_jaas.conf.j2   |     8 +
 .../HBASE/package/templates/regionservers.j2    |     2 +
 .../HDP/2.0.6/services/HCATALOG/metainfo.xml    |    30 -
 .../services/HDFS/configuration/hdfs-log4j.xml  |   305 +
 .../services/HDFS/configuration/hdfs-site.xml   |   300 +-
 .../stacks/HDP/2.0.6/services/HDFS/metainfo.xml |   137 +-
 .../HDFS/package/files/checkForFormat.sh        |    62 +
 .../services/HDFS/package/files/checkWebUI.py   |    53 +
 .../services/HDFS/package/scripts/datanode.py   |    57 +
 .../HDFS/package/scripts/hdfs_client.py         |    52 +
 .../HDFS/package/scripts/hdfs_datanode.py       |    57 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   212 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 +
 .../HDFS/package/scripts/journalnode.py         |    74 +
 .../services/HDFS/package/scripts/namenode.py   |    68 +
 .../services/HDFS/package/scripts/params.py     |   193 +
 .../HDFS/package/scripts/service_check.py       |   107 +
 .../services/HDFS/package/scripts/snamenode.py  |    64 +
 .../HDFS/package/scripts/status_params.py       |    31 +
 .../services/HDFS/package/scripts/utils.py      |   138 +
 .../services/HDFS/package/scripts/zkfc_slave.py |    62 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../HIVE/configuration/hive-exec-log4j.xml      |   122 +
 .../services/HIVE/configuration/hive-log4j.xml  |   130 +
 .../services/HIVE/configuration/hive-site.xml   |     2 +-
 .../stacks/HDP/2.0.6/services/HIVE/metainfo.xml |   147 +-
 .../services/HIVE/package/files/addMysqlUser.sh |    41 +
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 +
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 +
 .../services/HIVE/package/files/hiveserver2.sql |    23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 +
 .../services/HIVE/package/files/pigSmoke.sh     |    18 +
 .../HIVE/package/files/startHiveserver2.sh      |    22 +
 .../HIVE/package/files/startMetastore.sh        |    22 +
 .../services/HIVE/package/scripts/__init__.py   |    19 +
 .../2.0.6/services/HIVE/package/scripts/hcat.py |    47 +
 .../HIVE/package/scripts/hcat_client.py         |    43 +
 .../HIVE/package/scripts/hcat_service_check.py  |    63 +
 .../2.0.6/services/HIVE/package/scripts/hive.py |   155 +
 .../HIVE/package/scripts/hive_client.py         |    41 +
 .../HIVE/package/scripts/hive_metastore.py      |    63 +
 .../HIVE/package/scripts/hive_server.py         |    63 +
 .../HIVE/package/scripts/hive_service.py        |    56 +
 .../HIVE/package/scripts/mysql_server.py        |    77 +
 .../HIVE/package/scripts/mysql_service.py       |    38 +
 .../services/HIVE/package/scripts/params.py     |   135 +
 .../HIVE/package/scripts/service_check.py       |    56 +
 .../HIVE/package/scripts/status_params.py       |    30 +
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 +
 .../HIVE/package/templates/hive-env.sh.j2       |    55 +
 .../configuration/container-executor.cfg        |    20 -
 .../MAPREDUCE2/configuration/core-site.xml      |    20 -
 .../MAPREDUCE2/configuration/global.xml         |    44 -
 .../configuration/mapred-queue-acls.xml         |    52 -
 .../MAPREDUCE2/configuration/mapred-site.xml    |   386 -
 .../HDP/2.0.6/services/MAPREDUCE2/metainfo.xml  |    37 -
 .../HDP/2.0.6/services/MAPREDUCE2/metrics.json  |   383 -
 .../services/NAGIOS/configuration/global.xml    |    51 -
 .../HDP/2.0.6/services/NAGIOS/metainfo.xml      |   101 +-
 .../NAGIOS/package/files/check_aggregate.php    |   243 +
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 +
 .../package/files/check_datanode_storage.php    |   100 +
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 +
 .../package/files/check_hdfs_capacity.php       |   109 +
 .../files/check_hive_metastore_status.sh        |    45 +
 .../NAGIOS/package/files/check_hue_status.sh    |    31 +
 .../files/check_mapred_local_dir_used.sh        |    34 +
 .../package/files/check_name_dir_status.php     |    93 +
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 +
 .../package/files/check_nodemanager_health.sh   |    44 +
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 +
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 +
 .../package/files/check_templeton_status.sh     |    45 +
 .../NAGIOS/package/files/check_webui.sh         |    87 +
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 +
 .../NAGIOS/package/scripts/functions.py         |    31 +
 .../services/NAGIOS/package/scripts/nagios.py   |    97 +
 .../NAGIOS/package/scripts/nagios_server.py     |    87 +
 .../package/scripts/nagios_server_config.py     |    91 +
 .../NAGIOS/package/scripts/nagios_service.py    |    36 +
 .../services/NAGIOS/package/scripts/params.py   |   162 +
 .../NAGIOS/package/scripts/status_params.py     |    26 +
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 +
 .../package/templates/hadoop-commands.cfg.j2    |   114 +
 .../package/templates/hadoop-hostgroups.cfg.j2  |    15 +
 .../package/templates/hadoop-hosts.cfg.j2       |    16 +
 .../templates/hadoop-servicegroups.cfg.j2       |    80 +
 .../package/templates/hadoop-services.cfg.j2    |   643 +
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 ++
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 +
 .../services/NAGIOS/package/templates/nagios.j2 |   146 +
 .../NAGIOS/package/templates/resource.cfg.j2    |    33 +
 .../OOZIE/configuration/oozie-log4j.xml         |   183 +
 .../HDP/2.0.6/services/OOZIE/metainfo.xml       |    75 +-
 .../services/OOZIE/package/files/oozieSmoke2.sh |    95 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 +
 .../services/OOZIE/package/scripts/oozie.py     |   134 +
 .../OOZIE/package/scripts/oozie_client.py       |    33 +
 .../OOZIE/package/scripts/oozie_server.py       |    47 +
 .../OOZIE/package/scripts/oozie_service.py      |    65 +
 .../services/OOZIE/package/scripts/params.py    |    83 +
 .../OOZIE/package/scripts/service_check.py      |    67 +
 .../OOZIE/package/scripts/status_params.py      |    26 +
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 +
 .../package/templates/oozie-log4j.properties.j2 |    74 +
 .../services/PIG/configuration/pig-log4j.xml    |    40 +
 .../stacks/HDP/2.0.6/services/PIG/metainfo.xml  |    47 +-
 .../services/PIG/package/files/pigSmoke.sh      |    18 +
 .../services/PIG/package/scripts/params.py      |    42 +
 .../2.0.6/services/PIG/package/scripts/pig.py   |    61 +
 .../services/PIG/package/scripts/pig_client.py  |    52 +
 .../PIG/package/scripts/service_check.py        |    75 +
 .../PIG/package/templates/pig-env.sh.j2         |    21 +
 .../PIG/package/templates/pig.properties.j2     |    55 +
 .../HDP/2.0.6/services/SQOOP/metainfo.xml       |    46 +-
 .../services/SQOOP/package/scripts/__init__.py  |    19 +
 .../services/SQOOP/package/scripts/params.py    |    37 +
 .../SQOOP/package/scripts/service_check.py      |    37 +
 .../services/SQOOP/package/scripts/sqoop.py     |    52 +
 .../SQOOP/package/scripts/sqoop_client.py       |    41 +
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 +
 .../services/WEBHCAT/configuration/global.xml   |    51 -
 .../HDP/2.0.6/services/WEBHCAT/metainfo.xml     |    58 +-
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 +
 .../WEBHCAT/package/scripts/__init__.py         |    21 +
 .../services/WEBHCAT/package/scripts/params.py  |    51 +
 .../WEBHCAT/package/scripts/service_check.py    |    45 +
 .../WEBHCAT/package/scripts/status_params.py    |    26 +
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 +
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 +
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 +
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 +
 .../YARN/configuration/container-executor.cfg   |    20 -
 .../services/YARN/configuration/global.xml      |    24 +
 .../YARN/configuration/mapred-queue-acls.xml    |    52 +
 .../services/YARN/configuration/mapred-site.xml |   386 +
 .../services/YARN/configuration/yarn-log4j.xml  |    97 +
 .../services/YARN/configuration/yarn-site.xml   |    11 +
 .../stacks/HDP/2.0.6/services/YARN/metainfo.xml |   168 +-
 .../stacks/HDP/2.0.6/services/YARN/metrics.json |    82 +-
 .../files/validateYarnComponentStatus.py        |   165 +
 .../services/YARN/package/scripts/__init__.py   |    21 +
 .../YARN/package/scripts/historyserver.py       |    60 +
 .../package/scripts/mapred_service_check.py     |    74 +
 .../YARN/package/scripts/mapreduce2_client.py   |    43 +
 .../YARN/package/scripts/nodemanager.py         |    61 +
 .../services/YARN/package/scripts/params.py     |    89 +
 .../YARN/package/scripts/resourcemanager.py     |   112 +
 .../services/YARN/package/scripts/service.py    |    65 +
 .../YARN/package/scripts/service_check.py       |    67 +
 .../YARN/package/scripts/status_params.py       |    34 +
 .../2.0.6/services/YARN/package/scripts/yarn.py |   119 +
 .../YARN/package/scripts/yarn_client.py         |    43 +
 .../package/templates/container-executor.cfg.j2 |    22 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../YARN/package/templates/mapreduce.conf.j2    |    17 +
 .../YARN/package/templates/yarn-env.sh.j2       |   119 +
 .../YARN/package/templates/yarn.conf.j2         |    17 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |    84 +
 .../HDP/2.0.6/services/ZOOKEEPER/metainfo.xml   |    56 +-
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 +
 .../ZOOKEEPER/package/files/zkService.sh        |    26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 +
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 +
 .../ZOOKEEPER/package/scripts/params.py         |    77 +
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 +
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   106 +
 .../package/scripts/zookeeper_client.py         |    43 +
 .../package/scripts/zookeeper_server.py         |    55 +
 .../package/scripts/zookeeper_service.py        |    43 +
 .../package/templates/configuration.xsl.j2      |    24 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 +
 .../package/templates/zookeeper-env.sh.j2       |    25 +
 .../templates/zookeeper_client_jaas.conf.j2     |     5 +
 .../package/templates/zookeeper_jaas.conf.j2    |     8 +
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 -
 .../2.1.1/hooks/before-INSTALL/scripts/hook.py  |    36 -
 .../hooks/before-INSTALL/scripts/params.py      |    84 -
 .../scripts/shared_initialization.py            |   113 -
 .../hooks/before-START/files/checkForFormat.sh  |    62 -
 .../before-START/files/task-log4j.properties    |   132 -
 .../2.1.1/hooks/before-START/scripts/hook.py    |    37 -
 .../2.1.1/hooks/before-START/scripts/params.py  |   197 -
 .../scripts/shared_initialization.py            |   322 -
 .../templates/commons-logging.properties.j2     |    25 -
 .../templates/exclude_hosts_list.j2             |     3 -
 .../before-START/templates/hadoop-env.sh.j2     |   125 -
 .../templates/hadoop-metrics2.properties.j2     |    45 -
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 -
 .../before-START/templates/health_check-v2.j2   |    91 -
 .../before-START/templates/health_check.j2      |   118 -
 .../templates/include_hosts_list.j2             |     3 -
 .../hooks/before-START/templates/slaves.j2      |     3 -
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 -
 .../templates/taskcontroller.cfg.j2             |    20 -
 .../resources/stacks/HDP/2.1.1/metainfo.xml     |     1 +
 .../HDP/2.1.1/services/GANGLIA/metainfo.xml     |   108 -
 .../GANGLIA/package/files/checkGmetad.sh        |    37 -
 .../GANGLIA/package/files/checkGmond.sh         |    62 -
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 -
 .../services/GANGLIA/package/files/gmetad.init  |    73 -
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 -
 .../services/GANGLIA/package/files/gmond.init   |    73 -
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 -
 .../2.1.1/services/GANGLIA/package/files/rrd.py |   213 -
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 -
 .../GANGLIA/package/files/setupGanglia.sh       |   141 -
 .../GANGLIA/package/files/startGmetad.sh        |    64 -
 .../GANGLIA/package/files/startGmond.sh         |    80 -
 .../GANGLIA/package/files/startRrdcached.sh     |    69 -
 .../GANGLIA/package/files/stopGmetad.sh         |    43 -
 .../services/GANGLIA/package/files/stopGmond.sh |    54 -
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 -
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 -
 .../services/GANGLIA/package/scripts/ganglia.py |    97 -
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   139 -
 .../package/scripts/ganglia_monitor_service.py  |    31 -
 .../GANGLIA/package/scripts/ganglia_server.py   |   197 -
 .../package/scripts/ganglia_server_service.py   |    27 -
 .../services/GANGLIA/package/scripts/params.py  |    80 -
 .../GANGLIA/package/scripts/status_params.py    |    25 -
 .../package/templates/gangliaClusters.conf.j2   |    35 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 -
 .../services/HBASE/configuration/global.xml     |   160 -
 .../HBASE/configuration/hbase-log4j.xml         |   183 -
 .../HBASE/configuration/hbase-policy.xml        |    53 -
 .../services/HBASE/configuration/hbase-site.xml |   359 -
 .../HDP/2.1.1/services/HBASE/metainfo.xml       |    94 -
 .../HDP/2.1.1/services/HBASE/metrics.json       | 13635 -----------------
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 -
 .../services/HBASE/package/scripts/__init__.py  |    19 -
 .../services/HBASE/package/scripts/functions.py |    40 -
 .../services/HBASE/package/scripts/hbase.py     |   107 -
 .../HBASE/package/scripts/hbase_client.py       |    52 -
 .../HBASE/package/scripts/hbase_decommission.py |    53 -
 .../HBASE/package/scripts/hbase_master.py       |    81 -
 .../HBASE/package/scripts/hbase_regionserver.py |    75 -
 .../HBASE/package/scripts/hbase_service.py      |    46 -
 .../services/HBASE/package/scripts/params.py    |    95 -
 .../HBASE/package/scripts/service_check.py      |    89 -
 .../HBASE/package/scripts/status_params.py      |    25 -
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    62 -
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    62 -
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 -
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 -
 .../package/templates/hbase_client_jaas.conf.j2 |     5 -
 .../templates/hbase_grant_permissions.j2        |    21 -
 .../package/templates/hbase_master_jaas.conf.j2 |     8 -
 .../templates/hbase_regionserver_jaas.conf.j2   |     8 -
 .../HBASE/package/templates/regionservers.j2    |     2 -
 .../services/HDFS/configuration/core-site.xml   |   169 -
 .../services/HDFS/configuration/global.xml      |   187 -
 .../HDFS/configuration/hadoop-policy.xml        |   134 -
 .../services/HDFS/configuration/hdfs-log4j.xml  |   305 -
 .../services/HDFS/configuration/hdfs-site.xml   |   518 -
 .../stacks/HDP/2.1.1/services/HDFS/metainfo.xml |   153 -
 .../stacks/HDP/2.1.1/services/HDFS/metrics.json |  7800 ----------
 .../HDFS/package/files/checkForFormat.sh        |    62 -
 .../services/HDFS/package/files/checkWebUI.py   |    53 -
 .../services/HDFS/package/scripts/datanode.py   |    57 -
 .../HDFS/package/scripts/hdfs_client.py         |    52 -
 .../HDFS/package/scripts/hdfs_datanode.py       |    57 -
 .../HDFS/package/scripts/hdfs_namenode.py       |   212 -
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 -
 .../HDFS/package/scripts/journalnode.py         |    74 -
 .../services/HDFS/package/scripts/namenode.py   |    68 -
 .../services/HDFS/package/scripts/params.py     |   193 -
 .../HDFS/package/scripts/service_check.py       |   107 -
 .../services/HDFS/package/scripts/snamenode.py  |    64 -
 .../HDFS/package/scripts/status_params.py       |    31 -
 .../services/HDFS/package/scripts/utils.py      |   138 -
 .../services/HDFS/package/scripts/zkfc_slave.py |    62 -
 .../package/templates/exclude_hosts_list.j2     |     3 -
 .../HIVE/configuration/hive-exec-log4j.xml      |   122 -
 .../services/HIVE/configuration/hive-log4j.xml  |   130 -
 .../services/HIVE/configuration/hive-site.xml   |   285 -
 .../stacks/HDP/2.1.1/services/HIVE/metainfo.xml |   158 -
 .../services/HIVE/package/files/addMysqlUser.sh |    41 -
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 -
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 -
 .../services/HIVE/package/files/hiveserver2.sql |    23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 -
 .../services/HIVE/package/files/pigSmoke.sh     |    18 -
 .../HIVE/package/files/startHiveserver2.sh      |    22 -
 .../HIVE/package/files/startMetastore.sh        |    22 -
 .../services/HIVE/package/scripts/__init__.py   |    19 -
 .../2.1.1/services/HIVE/package/scripts/hcat.py |    47 -
 .../HIVE/package/scripts/hcat_client.py         |    43 -
 .../HIVE/package/scripts/hcat_service_check.py  |    63 -
 .../2.1.1/services/HIVE/package/scripts/hive.py |   155 -
 .../HIVE/package/scripts/hive_client.py         |    41 -
 .../HIVE/package/scripts/hive_metastore.py      |    63 -
 .../HIVE/package/scripts/hive_server.py         |    63 -
 .../HIVE/package/scripts/hive_service.py        |    56 -
 .../HIVE/package/scripts/mysql_server.py        |    77 -
 .../HIVE/package/scripts/mysql_service.py       |    38 -
 .../services/HIVE/package/scripts/params.py     |   135 -
 .../HIVE/package/scripts/service_check.py       |    56 -
 .../HIVE/package/scripts/status_params.py       |    30 -
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 -
 .../HIVE/package/templates/hive-env.sh.j2       |    55 -
 .../HDP/2.1.1/services/NAGIOS/metainfo.xml      |   113 -
 .../NAGIOS/package/files/check_aggregate.php    |   243 -
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 -
 .../package/files/check_datanode_storage.php    |   100 -
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 -
 .../package/files/check_hdfs_capacity.php       |   109 -
 .../files/check_hive_metastore_status.sh        |    45 -
 .../NAGIOS/package/files/check_hue_status.sh    |    31 -
 .../files/check_mapred_local_dir_used.sh        |    34 -
 .../package/files/check_name_dir_status.php     |    93 -
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 -
 .../package/files/check_nodemanager_health.sh   |    44 -
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 -
 .../package/files/check_templeton_status.sh     |    45 -
 .../NAGIOS/package/files/check_webui.sh         |    87 -
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 -
 .../NAGIOS/package/scripts/functions.py         |    31 -
 .../services/NAGIOS/package/scripts/nagios.py   |    97 -
 .../NAGIOS/package/scripts/nagios_server.py     |    87 -
 .../package/scripts/nagios_server_config.py     |    91 -
 .../NAGIOS/package/scripts/nagios_service.py    |    36 -
 .../services/NAGIOS/package/scripts/params.py   |   162 -
 .../NAGIOS/package/scripts/status_params.py     |    26 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 -
 .../package/templates/hadoop-commands.cfg.j2    |   114 -
 .../package/templates/hadoop-hostgroups.cfg.j2  |    15 -
 .../package/templates/hadoop-hosts.cfg.j2       |    16 -
 .../templates/hadoop-servicegroups.cfg.j2       |    80 -
 .../package/templates/hadoop-services.cfg.j2    |   643 -
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 --
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 -
 .../services/NAGIOS/package/templates/nagios.j2 |   146 -
 .../NAGIOS/package/templates/resource.cfg.j2    |    33 -
 .../OOZIE/configuration/oozie-log4j.xml         |   183 -
 .../services/OOZIE/configuration/oozie-site.xml |   329 -
 .../HDP/2.1.1/services/OOZIE/metainfo.xml       |    85 -
 .../services/OOZIE/package/files/oozieSmoke2.sh |    95 -
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 -
 .../services/OOZIE/package/scripts/oozie.py     |   134 -
 .../OOZIE/package/scripts/oozie_client.py       |    33 -
 .../OOZIE/package/scripts/oozie_server.py       |    47 -
 .../OOZIE/package/scripts/oozie_service.py      |    65 -
 .../services/OOZIE/package/scripts/params.py    |    83 -
 .../OOZIE/package/scripts/service_check.py      |    67 -
 .../OOZIE/package/scripts/status_params.py      |    26 -
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 -
 .../package/templates/oozie-log4j.properties.j2 |    74 -
 .../services/PIG/configuration/pig-log4j.xml    |    40 -
 .../services/PIG/configuration/pig.properties   |    52 -
 .../stacks/HDP/2.1.1/services/PIG/metainfo.xml  |    61 -
 .../services/PIG/package/files/pigSmoke.sh      |    18 -
 .../services/PIG/package/scripts/params.py      |    42 -
 .../2.1.1/services/PIG/package/scripts/pig.py   |    61 -
 .../services/PIG/package/scripts/pig_client.py  |    52 -
 .../PIG/package/scripts/service_check.py        |    75 -
 .../PIG/package/templates/pig-env.sh.j2         |    21 -
 .../PIG/package/templates/pig.properties.j2     |    55 -
 .../HDP/2.1.1/services/SQOOP/metainfo.xml       |    60 -
 .../services/SQOOP/package/scripts/__init__.py  |    19 -
 .../services/SQOOP/package/scripts/params.py    |    37 -
 .../SQOOP/package/scripts/service_check.py      |    37 -
 .../services/SQOOP/package/scripts/sqoop.py     |    52 -
 .../SQOOP/package/scripts/sqoop_client.py       |    41 -
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 -
 .../WEBHCAT/configuration/webhcat-site.xml      |   126 -
 .../HDP/2.1.1/services/WEBHCAT/metainfo.xml     |    65 -
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 -
 .../WEBHCAT/package/scripts/__init__.py         |    21 -
 .../services/WEBHCAT/package/scripts/params.py  |    51 -
 .../WEBHCAT/package/scripts/service_check.py    |    45 -
 .../WEBHCAT/package/scripts/status_params.py    |    26 -
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 -
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 -
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 -
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 -
 .../YARN/configuration/capacity-scheduler.xml   |   131 -
 .../services/YARN/configuration/core-site.xml   |    20 -
 .../services/YARN/configuration/global.xml      |    88 -
 .../YARN/configuration/mapred-queue-acls.xml    |    52 -
 .../services/YARN/configuration/mapred-site.xml |   386 -
 .../services/YARN/configuration/yarn-log4j.xml  |    97 -
 .../services/YARN/configuration/yarn-site.xml   |   342 -
 .../stacks/HDP/2.1.1/services/YARN/metainfo.xml |   174 -
 .../stacks/HDP/2.1.1/services/YARN/metrics.json |  2534 ---
 .../files/validateYarnComponentStatus.py        |   165 -
 .../services/YARN/package/scripts/__init__.py   |    21 -
 .../YARN/package/scripts/historyserver.py       |    60 -
 .../package/scripts/mapred_service_check.py     |    74 -
 .../YARN/package/scripts/mapreduce2_client.py   |    43 -
 .../YARN/package/scripts/nodemanager.py         |    61 -
 .../services/YARN/package/scripts/params.py     |    89 -
 .../YARN/package/scripts/resourcemanager.py     |   112 -
 .../services/YARN/package/scripts/service.py    |    65 -
 .../YARN/package/scripts/service_check.py       |    67 -
 .../YARN/package/scripts/status_params.py       |    34 -
 .../2.1.1/services/YARN/package/scripts/yarn.py |   119 -
 .../YARN/package/scripts/yarn_client.py         |    43 -
 .../package/templates/container-executor.cfg.j2 |    22 -
 .../package/templates/exclude_hosts_list.j2     |     3 -
 .../YARN/package/templates/mapreduce.conf.j2    |    17 -
 .../YARN/package/templates/yarn-env.sh.j2       |   119 -
 .../YARN/package/templates/yarn.conf.j2         |    17 -
 .../services/ZOOKEEPER/configuration/global.xml |    75 -
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |    84 -
 .../HDP/2.1.1/services/ZOOKEEPER/metainfo.xml   |    71 -
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 -
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 -
 .../ZOOKEEPER/package/files/zkService.sh        |    26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 -
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 -
 .../ZOOKEEPER/package/scripts/params.py         |    77 -
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 -
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   106 -
 .../package/scripts/zookeeper_client.py         |    43 -
 .../package/scripts/zookeeper_server.py         |    55 -
 .../package/scripts/zookeeper_service.py        |    43 -
 .../package/templates/configuration.xsl.j2      |    24 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 -
 .../package/templates/zookeeper-env.sh.j2       |    25 -
 .../templates/zookeeper_client_jaas.conf.j2     |     5 -
 .../package/templates/zookeeper_jaas.conf.j2    |     8 -
 .../1.3.2/GANGLIA/test_ganglia_monitor.py       |   195 +
 .../stacks/1.3.2/GANGLIA/test_ganglia_server.py |   226 +
 .../stacks/1.3.2/HBASE/test_hbase_client.py     |   115 +
 .../stacks/1.3.2/HBASE/test_hbase_master.py     |   224 +
 .../1.3.2/HBASE/test_hbase_regionserver.py      |   196 +
 .../stacks/1.3.2/HIVE/test_hcat_client.py       |    67 +
 .../stacks/1.3.2/HIVE/test_hive_client.py       |    97 +
 .../stacks/1.3.2/HIVE/test_hive_metastore.py    |   214 +
 .../stacks/1.3.2/HIVE/test_hive_server.py       |   215 +
 .../1.3.2/HIVE/test_hive_service_check.py       |   117 +
 .../stacks/1.3.2/HIVE/test_mysql_server.py      |   141 +
 .../1.3.2/MAPREDUCE/test_mapreduce_client.py    |    87 +
 .../MAPREDUCE/test_mapreduce_historyserver.py   |   157 +
 .../MAPREDUCE/test_mapreduce_jobtracker.py      |   196 +
 .../MAPREDUCE/test_mapreduce_service_check.py   |    78 +
 .../MAPREDUCE/test_mapreduce_tasktracker.py     |   157 +
 .../stacks/1.3.2/SQOOP/test_service_check.py    |    48 +
 .../python/stacks/1.3.2/SQOOP/test_sqoop.py     |    52 +
 .../stacks/1.3.2/WEBHCAT/test_webhcat_server.py |   188 +
 .../1.3.2/WEBHCAT/test_webhcat_service_check.py |    61 +
 .../1.3.2/ZOOKEEPER/test_zookeeper_client.py    |   124 +
 .../1.3.2/ZOOKEEPER/test_zookeeper_server.py    |   200 +
 .../ZOOKEEPER/test_zookeeper_service_check.py   |    61 +
 .../python/stacks/1.3.2/configs/default.json    |   444 +
 .../python/stacks/1.3.2/configs/secured.json    |   549 +
 .../hooks/before-INSTALL/test_before_install.py |    78 +
 .../hooks/before-START/test_before_start.py     |   150 +
 .../1.3.3/GANGLIA/test_ganglia_monitor.py       |   195 -
 .../stacks/1.3.3/GANGLIA/test_ganglia_server.py |   226 -
 .../stacks/1.3.3/HBASE/test_hbase_client.py     |   115 -
 .../stacks/1.3.3/HBASE/test_hbase_master.py     |   224 -
 .../1.3.3/HBASE/test_hbase_regionserver.py      |   196 -
 .../stacks/1.3.3/HIVE/test_hcat_client.py       |    67 -
 .../stacks/1.3.3/HIVE/test_hive_client.py       |    97 -
 .../stacks/1.3.3/HIVE/test_hive_metastore.py    |   214 -
 .../stacks/1.3.3/HIVE/test_hive_server.py       |   215 -
 .../1.3.3/HIVE/test_hive_service_check.py       |   117 -
 .../stacks/1.3.3/HIVE/test_mysql_server.py      |   141 -
 .../1.3.3/MAPREDUCE/test_mapreduce_client.py    |    87 -
 .../MAPREDUCE/test_mapreduce_historyserver.py   |   157 -
 .../MAPREDUCE/test_mapreduce_jobtracker.py      |   196 -
 .../MAPREDUCE/test_mapreduce_service_check.py   |    78 -
 .../MAPREDUCE/test_mapreduce_tasktracker.py     |   157 -
 .../stacks/1.3.3/SQOOP/test_service_check.py    |    48 -
 .../python/stacks/1.3.3/SQOOP/test_sqoop.py     |    52 -
 .../stacks/1.3.3/WEBHCAT/test_webhcat_server.py |   188 -
 .../1.3.3/WEBHCAT/test_webhcat_service_check.py |    61 -
 .../1.3.3/ZOOKEEPER/test_zookeeper_client.py    |   124 -
 .../1.3.3/ZOOKEEPER/test_zookeeper_server.py    |   200 -
 .../ZOOKEEPER/test_zookeeper_service_check.py   |    61 -
 .../python/stacks/1.3.3/configs/default.json    |   444 -
 .../python/stacks/1.3.3/configs/secured.json    |   549 -
 .../hooks/before-INSTALL/test_before_install.py |    78 -
 .../hooks/before-START/test_before_start.py     |   150 -
 .../2.0.6/GANGLIA/test_ganglia_monitor.py       |   169 +
 .../stacks/2.0.6/GANGLIA/test_ganglia_server.py |   214 +
 .../stacks/2.0.6/HBASE/test_hbase_client.py     |   115 +
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |   224 +
 .../2.0.6/HBASE/test_hbase_regionserver.py      |   195 +
 .../stacks/2.0.6/HIVE/test_hcat_client.py       |    67 +
 .../stacks/2.0.6/HIVE/test_hive_client.py       |    99 +
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |   213 +
 .../stacks/2.0.6/HIVE/test_hive_server.py       |   214 +
 .../2.0.6/HIVE/test_hive_service_check.py       |   117 +
 .../stacks/2.0.6/HIVE/test_mysql_server.py      |   142 +
 .../stacks/2.0.6/SQOOP/test_service_check.py    |    50 +
 .../python/stacks/2.0.6/SQOOP/test_sqoop.py     |    52 +
 .../stacks/2.0.6/WEBHCAT/test_webhcat_server.py |   191 +
 .../2.0.6/WEBHCAT/test_webhcat_service_check.py |    61 +
 .../stacks/2.0.6/YARN/test_historyserver.py     |   289 +
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |   215 +
 .../2.0.6/YARN/test_mapreduce2_service_check.py |    90 +
 .../stacks/2.0.6/YARN/test_nodemanager.py       |   290 +
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |   288 +
 .../stacks/2.0.6/YARN/test_yarn_client.py       |   215 +
 .../2.0.6/YARN/test_yarn_service_check.py       |    68 +
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py    |   125 +
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |   197 +
 .../ZOOKEEPER/test_zookeeper_service_check.py   |    61 +
 .../python/stacks/2.0.6/configs/default.json    |   611 +
 .../python/stacks/2.0.6/configs/secured.json    |   738 +
 .../hooks/before-INSTALL/test_before_install.py |    87 +
 .../hooks/before-START/test_before_start.py     |   157 +
 .../2.1.1/GANGLIA/test_ganglia_monitor.py       |   169 -
 .../stacks/2.1.1/GANGLIA/test_ganglia_server.py |   214 -
 .../stacks/2.1.1/HBASE/test_hbase_client.py     |   115 -
 .../stacks/2.1.1/HBASE/test_hbase_master.py     |   224 -
 .../2.1.1/HBASE/test_hbase_regionserver.py      |   195 -
 .../stacks/2.1.1/HIVE/test_hcat_client.py       |    67 -
 .../stacks/2.1.1/HIVE/test_hive_client.py       |    99 -
 .../stacks/2.1.1/HIVE/test_hive_metastore.py    |   213 -
 .../stacks/2.1.1/HIVE/test_hive_server.py       |   214 -
 .../2.1.1/HIVE/test_hive_service_check.py       |   117 -
 .../stacks/2.1.1/HIVE/test_mysql_server.py      |   142 -
 .../stacks/2.1.1/SQOOP/test_service_check.py    |    50 -
 .../python/stacks/2.1.1/SQOOP/test_sqoop.py     |    52 -
 .../stacks/2.1.1/WEBHCAT/test_webhcat_server.py |   191 -
 .../2.1.1/WEBHCAT/test_webhcat_service_check.py |    61 -
 .../stacks/2.1.1/YARN/test_historyserver.py     |   289 -
 .../stacks/2.1.1/YARN/test_mapreduce2_client.py |   215 -
 .../2.1.1/YARN/test_mapreduce2_service_check.py |    90 -
 .../stacks/2.1.1/YARN/test_nodemanager.py       |   290 -
 .../stacks/2.1.1/YARN/test_resourcemanager.py   |   288 -
 .../stacks/2.1.1/YARN/test_yarn_client.py       |   215 -
 .../2.1.1/YARN/test_yarn_service_check.py       |    68 -
 .../2.1.1/ZOOKEEPER/test_zookeeper_client.py    |   125 -
 .../2.1.1/ZOOKEEPER/test_zookeeper_server.py    |   197 -
 .../ZOOKEEPER/test_zookeeper_service_check.py   |    61 -
 .../hooks/before-INSTALL/test_before_install.py |    87 -
 .../hooks/before-START/test_before_start.py     |   157 -
 1075 files changed, 45535 insertions(+), 77422 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/files/changeToSecureUid.sh
new file mode 100644
index 0000000..4872a10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/files/changeToSecureUid.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..51e5cd2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,36 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_users()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..fa19ca3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,81 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+zk_user = config['configurations']['global']['zk_user']
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group =  "users"
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..26a7592
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  Group(params.user_group)
+  Group(params.smoke_user_group)
+  Group(params.proxyuser_group)
+  User(params.smoke_user,
+       gid=params.user_group,
+       groups=[params.proxyuser_group]
+  )
+  smoke_user_dirs = format(
+    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+  set_uid(params.smoke_user, smoke_user_dirs)
+
+  if params.has_hbase_masters:
+    User(params.hbase_user,
+         gid = params.user_group,
+         groups=[params.user_group])
+    hbase_user_dirs = format(
+      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+    set_uid(params.hbase_user, hbase_user_dirs)
+
+  if params.has_nagios:
+    Group(params.nagios_group)
+    User(params.nagios_user,
+         gid=params.nagios_group)
+
+  if params.has_oozie_server:
+    User(params.oozie_user,
+         gid = params.user_group)
+
+  if params.has_hcat_server_host:
+    User(params.webhcat_user,
+         gid = params.user_group)
+    User(params.hcat_user,
+         gid = params.user_group)
+
+  if params.has_hive_server_host:
+    User(params.hive_user,
+         gid = params.user_group)
+
+  if params.has_resourcemanager:
+    User(params.yarn_user,
+         gid = params.user_group)
+
+  if params.has_ganglia_server:
+    Group(params.gmetad_user)
+    Group(params.gmond_user)
+    User(params.gmond_user,
+         gid=params.user_group,
+        groups=[params.gmond_user])
+    User(params.gmetad_user,
+         gid=params.user_group,
+        groups=[params.gmetad_user])
+
+  User(params.hdfs_user,
+        gid=params.user_group,
+        groups=[params.user_group]
+  )
+  User(params.mapred_user,
+       gid=params.user_group,
+       groups=[params.user_group]
+  )
+  if params.has_zk_host:
+    User(params.zk_user,
+         gid=params.user_group)
+
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  File("/tmp/changeUid.sh",
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
+          not_if = format("test $(id -u {user}) -gt 1000"))
+
+def install_packages():
+  Package("unzip")
+  Package("net-snmp")
+  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..c8939fc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,132 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..e11bfac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/hook.py
@@ -0,0 +1,37 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_java()
+    setup_hadoop()
+    setup_configs()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()


[15/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index 57fdb35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index ec24c7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index f7d9f15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-def datanode(action=None):
-  import params
-
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    for data_dir in params.dfs_data_dir.split(","):
-      Directory(data_dir,
-                recursive=True,
-                mode=0755,
-                owner=params.hdfs_user,
-                group=params.user_group)
-
-  if action == "start":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )
-  if action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 8b29cc3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-import urlparse
-
-
-def namenode(action=None, format=True):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if format:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      create_pid_dir=True,
-      create_log_dir=True,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-    # TODO: extract creating of dirs to different services
-    create_app_directories()
-    create_user_directories()
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_app_directories():
-  import params
-
-  hdfs_directory(name="/tmp",
-                 owner=params.hdfs_user,
-                 mode="777"
-  )
-  #mapred directories
-  if params.has_histroryserver:
-    hdfs_directory(name="/mapred",
-                   owner=params.mapred_user
-    )
-    hdfs_directory(name="/mapred/system",
-                   owner=params.hdfs_user
-    )
-    #hbase directories
-  if len(params.hbase_master_hosts) != 0:
-    hdfs_directory(name=params.hbase_hdfs_root_dir,
-                   owner=params.hbase_user
-    )
-    hdfs_directory(name=params.hbase_staging_dir,
-                   owner=params.hbase_user,
-                   mode="711"
-    )
-    #hive directories
-  if len(params.hive_server_host) != 0:
-    hdfs_directory(name=params.hive_apps_whs_dir,
-                   owner=params.hive_user,
-                   mode="777"
-    )
-  if len(params.hcat_server_hosts) != 0:
-    hdfs_directory(name=params.webhcat_apps_dir,
-                   owner=params.webhcat_user,
-                   mode="755"
-    )
-  if len(params.hs_host) != 0:
-    if params.yarn_log_aggregation_enabled:
-      hdfs_directory(name=params.yarn_nm_app_log_dir,
-                     owner=params.yarn_user,
-                     group=params.user_group,
-                     mode="777",
-                     recursive_chmod=True
-      )
-    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="1777"
-    )
-
-  if params.has_falcon_host:
-    if params.falcon_store_uri[0:4] == "hdfs":
-      hdfs_directory(name=params.store_uri,
-                     owner=params.falcon_user,
-                     mode="755"
-      )
-
-def create_user_directories():
-  import params
-
-  hdfs_directory(name=params.smoke_hdfs_user_dir,
-                 owner=params.smoke_user,
-                 mode=params.smoke_hdfs_user_mode
-  )
-
-  if params.has_hive_server_host:
-    hdfs_directory(name=params.hive_hdfs_user_dir,
-                   owner=params.hive_user,
-                   mode=params.hive_hdfs_user_mode
-    )
-
-  if params.has_hcat_server_host:
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      hdfs_directory(name=params.hcat_hdfs_user_dir,
-                     owner=params.hcat_user,
-                     mode=params.hcat_hdfs_user_mode
-      )
-    hdfs_directory(name=params.webhcat_hdfs_user_dir,
-                   owner=params.webhcat_user,
-                   mode=params.webhcat_hdfs_user_mode
-    )
-
-  if params.has_oozie_server:
-    hdfs_directory(name=params.oozie_hdfs_user_dir,
-                   owner=params.oozie_user,
-                   mode=params.oozie_hdfs_user_mode
-    )
-
-
-def format_namenode(force=None):
-  import params
-
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True)
-    else:
-      File('/tmp/checkForFormat.sh',
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
-        "{dfs_name_dir}"),
-              not_if=format("test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-
-  ExecuteHadoop('dfsadmin -refreshNodes',
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index a943455..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-  elif action == "start":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )
-  elif action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index f2134d5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def configure(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index deb01d5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    #TODO remove when config action will be implemented
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 99fdc70..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,193 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-
-if System.get_instance().os_type == "oraclelinux":
-  ulimit_cmd = ''
-else:
-  ulimit_cmd = "ulimit -c unlimited; "
-
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-falcon_user = config['configurations']['global']['falcon_user']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = status_params.hdfs_user
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group = "users"
-
-#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
-
-# if stack_version[0] == "2":
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-# else:
-#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
-hbase_staging_dir = "/apps/hbase/staging"
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
-webhcat_apps_dir = "/apps/webhcat"
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
-
-if has_oozie_server:
-  oozie_hdfs_user_dir = format("/user/{oozie_user}")
-  oozie_hdfs_user_mode = 775
-if has_hcat_server_host:
-  hcat_hdfs_user_dir = format("/user/{hcat_user}")
-  hcat_hdfs_user_mode = 755
-  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-  webhcat_hdfs_user_mode = 755
-if has_hive_server_host:
-  hive_hdfs_user_dir = format("/user/{hive_user}")
-  hive_hdfs_user_mode = 700
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 770
-
-namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-
-# if stack_version[0] == "2":
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
-# else:
-#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-# if stack_version[0] == "2":
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-# else:
-#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-
-falcon_store_uri = config['configurations']['global']['falcon_store_uri']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index d27b13a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 8f682ec..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.config(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 4097373..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 017a47a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False, keytab=None, principal=None):
-  import params
-
-  kinit_cmd = "true"
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  hadoop_daemon = format(
-    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  if params.security_enabled:
-    principal_replaced = principal.replace("_HOST", params.hostname)
-    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
-
-    if name == "datanode":
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-  daemon_cmd = format("{cmd} {action} {name}")
-
-  service_is_up = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
-
-  Execute(kinit_cmd)
-  Execute(daemon_cmd,
-          user = user,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-         ignore_failures=True
-    )
-
-
-def hdfs_directory(name=None, owner=None, group=None,
-                   mode=None, recursive_chown=False, recursive_chmod=False):
-  import params
-
-  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
-  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  stub_dir = params.namenode_dirs_created_stub_dir
-  stub_filename = params.namenode_dirs_stub_filename
-  dir_absent_in_stub = format(
-    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
-  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
-  tries = 30
-  try_sleep = 10
-  dfs_check_nn_status_cmd = "true"
-
-  if params.dfs_ha_enabled:
-    namenode_id = params.namenode_id
-    dfs_check_nn_status_cmd = format(
-      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
-
-  #if params.stack_version[0] == "2":
-  mkdir_cmd = format("fs -mkdir -p {name}")
-  #else:
-  #  mkdir_cmd = format("fs -mkdir {name}")
-
-  if params.security_enabled:
-    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
-            user = params.hdfs_user)
-  ExecuteHadoop(mkdir_cmd,
-                try_sleep=try_sleep,
-                tries=tries,
-                not_if=format(
-                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "{dir_exists} && ! {namenode_safe_mode_off}"),
-                only_if=format(
-                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "! {dir_exists}"),
-                conf_dir=params.hadoop_conf_dir,
-                user=params.hdfs_user
-  )
-  Execute(record_dir_in_stub,
-          user=params.hdfs_user,
-          only_if=format("{dir_absent_in_stub}")
-  )
-
-  recursive = "-R" if recursive_chown else ""
-  perm_cmds = []
-
-  if owner:
-    chown = owner
-    if group:
-      chown = format("{owner}:{group}")
-    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
-  if mode:
-    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
-  for cmd in perm_cmds:
-    ExecuteHadoop(cmd,
-                  user=params.hdfs_user,
-                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
-                  try_sleep=try_sleep,
-                  tries=tries,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index 1f9ba65..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def config(self, env):
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c3af46e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
deleted file mode 100644
index b0f5268..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hive.log.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>hive.root.logger</name>
-    <value>INFO,FA</value>
-  </property>
-  <property>
-    <name>hive.log.dir</name>
-    <value>/tmp/${user.name}</value>
-  </property>
-  <property>
-    <name>hive.log.file</name>
-    <value>${hive.query.id}.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hive.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshhold</name>
-    <value>${hive.log.threshold}</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA</name>
-    <value>org.apache.log4j.FileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.File</name>
-    <value>${hive.log.dir}/${hive.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.FA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
-  </property>
-  <property>
-    <name>log4j.category.DataNucleus</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore.Schema</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Datastore</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Plugin</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.MetaData</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Query</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.General</name>
-    <value>ERROR,FA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Enhancer</name>
-    <value>ERROR,FA</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
deleted file mode 100644
index e92c3e8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hive.log.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>hive.root.logger</name>
-    <value>WARN,DRFA</value>
-  </property>
-  <property>
-    <name>hive.log.dir</name>
-    <value>/tmp/${user.name}</value>
-  </property>
-  <property>
-    <name>hive.log.file</name>
-    <value>hive.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hive.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshold</name>
-    <value>${hive.log.threshold}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hive.log.dir}/${hive.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.encoding</name>
-    <value>UTF-8</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
-  </property>
-  <property>
-    <name>log4j.category.DataNucleus</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.Datastore.Schema</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Datastore</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Plugin</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.MetaData</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Query</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.General</name>
-    <value>ERROR,DRFA</value>
-  </property>
-  <property>
-    <name>log4j.category.JPOX.Enhancer</name>
-    <value>ERROR,DRFA</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index bfdc8ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,285 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
-      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
-      process runs as.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable HDFS filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable local filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
-      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
-      this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-    of buckets, a sort-merge join can be performed by setting this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-    Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
deleted file mode 100644
index 3e89601..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,158 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.12.0.2.1.1</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MYSQL_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index 8d31b91..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index 9e7b33f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi


[20/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index 5145b9c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index ab5102d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index 239b62e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index e79472b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 1af3eb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 69fde27..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
-  import params
-
-def config():
-  import params
-
-  shell_cmds_dir = params.ganglia_shell_cmds_dir
-  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
-                 'gmondLib.sh', 'rrdcachedLib.sh',
-                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
-                 'startRrdcached.sh', 'stopGmetad.sh',
-                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
-  Directory(shell_cmds_dir,
-            owner="root",
-            group="root",
-            recursive=True
-  )
-  init_file("gmetad")
-  init_file("gmond")
-  for sh_file in shell_files:
-    shell_file(sh_file)
-  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
-    ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
-  import params
-
-  File("/etc/init.d/hdp-" + name,
-       content=StaticFile(name + ".init"),
-       mode=0755
-  )
-
-
-def shell_file(name):
-  import params
-
-  File(params.ganglia_shell_cmds_dir + os.sep + name,
-       content=StaticFile(name),
-       mode=0755
-  )
-
-
-def ganglia_TemplateConfig(name, mode=0755, tag=None):
-  import params
-
-  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
-                 owner="root",
-                 group="root",
-                 template_tag=tag,
-                 mode=mode
-  )
-
-
-def generate_daemon(ganglia_service,
-                    name=None,
-                    role=None,
-                    owner=None,
-                    group=None):
-  import params
-
-  cmd = ""
-  if ganglia_service == "gmond":
-    if role == "server":
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
-    else:
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
-  elif ganglia_service == "gmetad":
-    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
-  else:
-    raise Fail("Unexpected ganglia service")
-  Execute(format(cmd),
-          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
-                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index 4abdb9b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-
-  def start(self, env):
-    ganglia_monitor_service.monitor("start")
-
-  def stop(self, env):
-    ganglia_monitor_service.monitor("stop")
-
-
-  def status(self, env):
-    import status_params
-    pid_file_name = 'gmond.pid'
-    pid_file_count = 0
-    pid_dir = status_params.pid_dir
-    # Recursively check all existing gmond pid files
-    for cur_dir, subdirs, files in os.walk(pid_dir):
-      for file_name in files:
-        if file_name == pid_file_name:
-          pid_file = os.path.join(cur_dir, file_name)
-          check_process_status(pid_file)
-          pid_file_count += 1
-    if pid_file_count == 0: # If no any pid file is present
-      raise ComponentIsNotRunning()
-
-
-  def configure(self, env):
-    import params
-
-    ganglia.groups_and_users()
-
-    Directory(params.ganglia_conf_dir,
-              owner="root",
-              group=params.user_group,
-              recursive=True
-    )
-
-    ganglia.config()
-
-    if params.is_namenode_master:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_jtnode_master:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_rmnode_master:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hsnode_master:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hbase_master:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    pure_slave = not (params.is_namenode_master and
-                      params.is_jtnode_master and
-                      params.is_rmnode_master and
-                      params.is_hsnode_master and
-                      params.is_hbase_master) and params.is_slave
-    if pure_slave:
-      generate_daemon("gmond",
-                    name = "HDPSlaves",
-                    role = "monitor",
-                    owner = "root",
-                    group = params.user_group)
-
-    Directory(path.join(params.ganglia_dir, "conf.d"),
-              owner="root",
-              group=params.user_group
-    )
-
-    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "gmond.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-if __name__ == "__main__":
-  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index d86d894..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
-  if action == "start":
-    Execute("chkconfig gmond off",
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    )
-  Execute(
-    format(
-      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
-    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index 863f092..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_server_service
-
-
-class GangliaServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/gmetad.pid")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  def configure(self, env):
-    import params
-
-    ganglia.groups_and_users()
-    ganglia.config()
-
-    if params.has_namenodes:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_jobtracker:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_masters:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_resourcemanager:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_nodemanager:
-      generate_daemon("gmond",
-                      name = "HDPNodeManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_historyserver:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_slaves:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_journalnode:
-      generate_daemon("gmond",
-                      name = "HDPJournalNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    generate_daemon("gmetad",
-                    name = "gmetad",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-    change_permission()
-    server_files()
-    File(path.join(params.ganglia_dir, "gmetad.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-def change_permission():
-  import params
-
-  Directory('/var/lib/ganglia/dwoo',
-            mode=0777,
-            owner=params.gmetad_user,
-            recursive=True
-  )
-
-
-def server_files():
-  import params
-
-  rrd_py_path = params.rrd_py_path
-  Directory(rrd_py_path,
-            recursive=True
-  )
-  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
-  File(rrd_py_file_path,
-       content=StaticFile("rrd.py"),
-       mode=0755
-  )
-  rrd_file_owner = params.gmetad_user
-  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
-    Directory(params.rrdcached_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              mode=0755,
-              recursive=True
-    )
-    Directory(params.rrdcached_default_base_dir,
-              action = "delete"
-    )
-    Link(params.rrdcached_default_base_dir,
-         to=params.rrdcached_base_dir
-    )
-  elif rrd_file_owner != 'nobody':
-    Directory(params.rrdcached_default_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              recursive=True
-    )
-
-
-if __name__ == "__main__":
-  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
-  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  Execute(format(command),
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index 601601e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-
-config = Script.get_config()
-
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = "/etc/ganglia/hdp"
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-webserver_group = "apache"
-rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-rm_host = default("/clusterHostInfo/rm_host", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-# datanodes are marked as slave_hosts
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
-flume_hosts = default("/clusterHostInfo/flume_hosts", [])
-jn_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_nodemanager = hostname in nm_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-is_jn_host = hostname in jn_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_nodemanager = not len(nm_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-has_journalnode = not len(jn_hosts) == 0
-
-if System.get_instance().os_family == "suse":
-  rrd_py_path = '/srv/www/cgi-bin'
-else:
-  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 3ccad2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index f3bb355..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-    HDPJournalNode      {{ganglia_server_host}}   8654
-    HDPFlumeServer      {{ganglia_server_host}}   8655
-    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
-    HDPNodeManager     	{{ganglia_server_host}}   8657
-    HDPTaskTracker     	{{ganglia_server_host}}   8658
-    HDPDataNode       	{{ganglia_server_host}}   8659
-    HDPSlaves       	{{ganglia_server_host}}   8660
-    HDPNameNode         {{ganglia_server_host}}   8661
-    HDPJobTracker     	{{ganglia_server_host}}   8662
-    HDPHBaseMaster      {{ganglia_server_host}}   8663
-    HDPResourceManager  {{ganglia_server_host}}   8664
-    HDPHistoryServer    {{ganglia_server_host}}   8666
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 1ead550..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 4b5bdd1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
deleted file mode 100644
index b2c57bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.40</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>60</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>604800000</value>
-    <description>The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>30000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>10</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.38</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml
deleted file mode 100644
index 2258d73..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-log4j.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  <property>
-    <name>hbase.root.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hbase.security.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hbase.log.dir</name>
-    <value>.</value>
-  </property>
-  <property>
-    <name>hbase.log.file</name>
-    <value>hbase.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hbase.root.logger}</value>
-  </property>
-  <property>
-    <name>log4j.threshold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hbase.log.dir}/${hbase.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hbase.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hbase.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.File</name>
-    <value>${hbase.log.dir}/${hbase.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxFileSize</name>
-    <value>${hbase.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxBackupIndex</name>
-    <value>${hbase.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hbase.security.log.file</name>
-    <value>SecurityAuth.audit</value>
-  </property>
-  <property>
-    <name>hbase.security.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hbase.security.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.File</name>
-    <value>${hbase.log.dir}/${hbase.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxFileSize</name>
-    <value>${hbase.security.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxBackupIndex</name>
-    <value>${hbase.security.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.category.SecurityLogger</name>
-    <value>${hbase.security.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.SecurityLogger</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.NullAppender</name>
-    <value>org.apache.log4j.varia.NullAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.zookeeper</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hbase</name>
-    <value>DEBUG</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher</name>
-    <value>INFO</value>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index d1e933d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,359 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-    The number of times the region flush operation will be retried.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
-      (no authentication), and 'kerberos'.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>Disables version verification.</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>Path to domain socket.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
deleted file mode 100644
index c16b160..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.96.0.2.1.1</version>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hbase</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-        <config-type>hbase-log4j</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>


[28/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..8b29cc3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,212 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+import urlparse
+
+
+def namenode(action=None, format=True):
+  import params
+  #we need this directory to be present before any action(HA manual steps for
+  #additional namenode)
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if format:
+      format_namenode()
+      pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+    service(
+      action="start", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      create_pid_dir=True,
+      create_log_dir=True,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+    # TODO: extract creating of dirs to different services
+    create_app_directories()
+    create_user_directories()
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+  if action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+
+def create_app_directories():
+  import params
+
+  hdfs_directory(name="/tmp",
+                 owner=params.hdfs_user,
+                 mode="777"
+  )
+  #mapred directories
+  if params.has_histroryserver:
+    hdfs_directory(name="/mapred",
+                   owner=params.mapred_user
+    )
+    hdfs_directory(name="/mapred/system",
+                   owner=params.hdfs_user
+    )
+    #hbase directories
+  if len(params.hbase_master_hosts) != 0:
+    hdfs_directory(name=params.hbase_hdfs_root_dir,
+                   owner=params.hbase_user
+    )
+    hdfs_directory(name=params.hbase_staging_dir,
+                   owner=params.hbase_user,
+                   mode="711"
+    )
+    #hive directories
+  if len(params.hive_server_host) != 0:
+    hdfs_directory(name=params.hive_apps_whs_dir,
+                   owner=params.hive_user,
+                   mode="777"
+    )
+  if len(params.hcat_server_hosts) != 0:
+    hdfs_directory(name=params.webhcat_apps_dir,
+                   owner=params.webhcat_user,
+                   mode="755"
+    )
+  if len(params.hs_host) != 0:
+    if params.yarn_log_aggregation_enabled:
+      hdfs_directory(name=params.yarn_nm_app_log_dir,
+                     owner=params.yarn_user,
+                     group=params.user_group,
+                     mode="777",
+                     recursive_chmod=True
+      )
+    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="1777"
+    )
+
+  if params.has_falcon_host:
+    if params.falcon_store_uri[0:4] == "hdfs":
+      hdfs_directory(name=params.store_uri,
+                     owner=params.falcon_user,
+                     mode="755"
+      )
+
+def create_user_directories():
+  import params
+
+  hdfs_directory(name=params.smoke_hdfs_user_dir,
+                 owner=params.smoke_user,
+                 mode=params.smoke_hdfs_user_mode
+  )
+
+  if params.has_hive_server_host:
+    hdfs_directory(name=params.hive_hdfs_user_dir,
+                   owner=params.hive_user,
+                   mode=params.hive_hdfs_user_mode
+    )
+
+  if params.has_hcat_server_host:
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      hdfs_directory(name=params.hcat_hdfs_user_dir,
+                     owner=params.hcat_user,
+                     mode=params.hcat_hdfs_user_mode
+      )
+    hdfs_directory(name=params.webhcat_hdfs_user_dir,
+                   owner=params.webhcat_user,
+                   mode=params.webhcat_hdfs_user_mode
+    )
+
+  if params.has_oozie_server:
+    hdfs_directory(name=params.oozie_hdfs_user_dir,
+                   owner=params.oozie_user,
+                   mode=params.oozie_hdfs_user_mode
+    )
+
+
+def format_namenode(force=None):
+  import params
+
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True)
+    else:
+      File('/tmp/checkForFormat.sh',
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{dfs_name_dir}"),
+              not_if=format("test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
+    Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+  user_group = params.user_group
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..a943455
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+  elif action == "start":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )
+  elif action == "stop":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000..f2134d5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class JournalNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def configure(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              recursive=True,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..deb01d5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_namenode import namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    #TODO remove when config action will be implemented
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    namenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="configure")
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000..99fdc70
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -0,0 +1,193 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+import os
+
+config = Script.get_config()
+
+if System.get_instance().os_type == "oraclelinux":
+  ulimit_cmd = ''
+else:
+  ulimit_cmd = "ulimit -c unlimited; "
+
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+falcon_user = config['configurations']['global']['falcon_user']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+has_falcon_host = not len(falcon_host)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = status_params.hdfs_user
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group = "users"
+
+#hadoop params
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+hadoop_bin = "/usr/lib/hadoop/sbin"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
+
+# if stack_version[0] == "2":
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+# else:
+#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
+hbase_staging_dir = "/apps/hbase/staging"
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
+webhcat_apps_dir = "/apps/webhcat"
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
+
+if has_oozie_server:
+  oozie_hdfs_user_dir = format("/user/{oozie_user}")
+  oozie_hdfs_user_mode = 775
+if has_hcat_server_host:
+  hcat_hdfs_user_dir = format("/user/{hcat_user}")
+  hcat_hdfs_user_mode = 755
+  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+  webhcat_hdfs_user_mode = 755
+if has_hive_server_host:
+  hive_hdfs_user_dir = format("/user/{hive_user}")
+  hive_hdfs_user_mode = 700
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 770
+
+namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+
+# if stack_version[0] == "2":
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
+# else:
+#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+# if stack_version[0] == "2":
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+# else:
+#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+
+falcon_store_uri = config['configurations']['global']['falcon_store_uri']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..d27b13a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
+    test_dir_exists = format("hadoop fs -test -e {dir}")
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format(
+        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "{smoke_user}'"))
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      smoke_test_user = params.smoke_user
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format(
+        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName))
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = format(
+          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..8f682ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.config(env)
+    snamenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
new file mode 100644
index 0000000..4097373
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['global']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
new file mode 100644
index 0000000..017a47a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,138 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def service(action=None, name=None, user=None, create_pid_dir=False,
+            create_log_dir=False, keytab=None, principal=None):
+  import params
+
+  kinit_cmd = "true"
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  hadoop_daemon = format(
+    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  if params.security_enabled:
+    principal_replaced = principal.replace("_HOST", params.hostname)
+    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
+
+    if name == "datanode":
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+  daemon_cmd = format("{cmd} {action} {name}")
+
+  service_is_up = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
+
+  Execute(kinit_cmd)
+  Execute(daemon_cmd,
+          user = user,
+          not_if=service_is_up
+  )
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+         ignore_failures=True
+    )
+
+
+def hdfs_directory(name=None, owner=None, group=None,
+                   mode=None, recursive_chown=False, recursive_chmod=False):
+  import params
+
+  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
+  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
+
+  stub_dir = params.namenode_dirs_created_stub_dir
+  stub_filename = params.namenode_dirs_stub_filename
+  dir_absent_in_stub = format(
+    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
+  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
+  tries = 30
+  try_sleep = 10
+  dfs_check_nn_status_cmd = "true"
+
+  if params.dfs_ha_enabled:
+    namenode_id = params.namenode_id
+    dfs_check_nn_status_cmd = format(
+      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
+
+  #if params.stack_version[0] == "2":
+  mkdir_cmd = format("fs -mkdir -p {name}")
+  #else:
+  #  mkdir_cmd = format("fs -mkdir {name}")
+
+  if params.security_enabled:
+    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
+            user = params.hdfs_user)
+  ExecuteHadoop(mkdir_cmd,
+                try_sleep=try_sleep,
+                tries=tries,
+                not_if=format(
+                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "{dir_exists} && ! {namenode_safe_mode_off}"),
+                only_if=format(
+                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "! {dir_exists}"),
+                conf_dir=params.hadoop_conf_dir,
+                user=params.hdfs_user
+  )
+  Execute(record_dir_in_stub,
+          user=params.hdfs_user,
+          only_if=format("{dir_absent_in_stub}")
+  )
+
+  recursive = "-R" if recursive_chown else ""
+  perm_cmds = []
+
+  if owner:
+    chown = owner
+    if group:
+      chown = format("{owner}:{group}")
+    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
+  if mode:
+    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
+  for cmd in perm_cmds:
+    ExecuteHadoop(cmd,
+                  user=params.hdfs_user,
+                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
+                  try_sleep=try_sleep,
+                  tries=tries,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..1f9ba65
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class ZkfcSlave(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def config(self, env):
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.zkfc_pid_file)
+
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
new file mode 100644
index 0000000..b0f5268
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>INFO,FA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>${hive.query.id}.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA</name>
+    <value>org.apache.log4j.FileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,FA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
new file mode 100644
index 0000000..e92c3e8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>WARN,DRFA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>hive.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.encoding</name>
+    <value>UTF-8</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,DRFA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
index 899cfe9..bfdc8ac 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
@@ -183,7 +183,7 @@ limitations under the License.
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
     <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
index f13193f..3e89601 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
@@ -16,30 +16,143 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.12.0.2.0.6.1</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
 
-    <components>        
         <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
+          <name>HIVE_METASTORE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
+
         <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
+          <name>HIVE_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+
+        <component>
+          <name>MYSQL_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
         <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
+          <name>HIVE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.12.0.2.0.6.0</version>
+      <components>
         <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HCAT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hive-site</config-type>
-    </configuration-dependencies>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..8d31b91
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+myhostname=$(hostname -f)
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
+  echo "Adding user $mysqldbuser@$myhostname";
+  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
+  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
+fi
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..9e7b33f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..051a21e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh
new file mode 100644
index 0000000..fa90c2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 0000000..9350776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
new file mode 100644
index 0000000..2993d3a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hcat_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  hcat_TemplateConfig('hcat-env.sh')
+
+
+def hcat_TemplateConfig(name):
+  import params
+
+  TemplateConfig(format("{hcat_conf_dir}/{name}"),
+                 owner=params.hcat_user,
+                 group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000..8b5921a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..bc75e44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+
+def hcat_service_check():
+    import params
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..44e6a77
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+
+  log4j_exec_filename = 'hive-exec-log4j.properties'
+  if (params.log4j_exec_props != None):
+    PropertiesFile(log4j_exec_filename,
+                   dir=params.hive_conf_dir,
+                   properties=params.log4j_exec_props,
+                   mode=0664,
+                   owner=params.hive_user,
+                   group=params.user_group
+    )
+  elif (os.path.exists("{params.hive_conf_dir}/{log4j_exec_filename}.template")):
+    File(format("{params.hive_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{params.hive_conf_dir}/{log4j_exec_filename}.template"))
+    )
+
+  log4j_filename = 'hive-log4j.properties'
+  if (params.log4j_props != None):
+    PropertiesFile(log4j_filename,
+                   dir=params.hive_conf_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hive_user,
+                   group=params.user_group
+    )
+  elif (os.path.exists("{params.hive_conf_dir}/{log4j_filename}.template")):
+    File(format("{params.hive_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{params.hive_conf_dir}/{log4j_filename}.template"))
+    )
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()


[50/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..211c2bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
@@ -0,0 +1,197 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#java params
+artifact_dir = "/tmp/HDP-artifacts/"
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#users and groups
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+yarn_user = config['configurations']['global']['yarn_user']
+
+user_group = config['configurations']['global']['user_group']
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#snmp
+snmp_conf_dir = "/etc/snmp/"
+snmp_source = "0.0.0.0/0"
+snmp_community = "hadoop"
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+#hadoop params
+hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+hadoop_lib_home = "/usr/lib/hadoop/lib"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_home = "/usr"
+hadoop_bin = "/usr/lib/hadoop/bin"
+
+task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+limits_conf_dir = "/etc/security/limits.d"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url']
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
+
+rca_enabled = config['configurations']['global']['rca_enabled']
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+if System.get_instance().os_family == "suse":
+  jsvc_path = "/usr/lib/bigtop-utils"
+else:
+  jsvc_path = "/usr/libexec/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['global']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("jtnode_heapsize","1024m")
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+#hdfs ha properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namenode_ids:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+rca_property_map = {
+  'ambari.jobhistory.database': ambari_db_rca_url,
+  'ambari.jobhistory.driver': ambari_db_rca_driver,
+  'ambari.jobhistory.user': ambari_db_rca_username,
+  'ambari.jobhistory.password': ambari_db_rca_password,
+  'ambari.jobhistory.logger': 'DEBUG,JHA',
+
+  'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
+  'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
+  'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
+  'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
+  'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
+
+  'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
+  'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
+}
+
+if ('hdfs-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hdfs-log4j']
+  if 'mapreduce-log4j' in config['configurations']:
+    log4j_props.update(config['configurations']['mapreduce-log4j'])
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..d6521ee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,317 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
+  java_dir = os.path.dirname(params.java_home)
+  java_exec = format("{java_home}/bin/java")
+  
+  if not params.jdk_name:
+    return
+  
+  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}"))
+
+  if params.jdk_name.endswith(".bin"):
+    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+  elif params.jdk_name.endswith(".gz"):
+    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+  
+  Execute(install_cmd,
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}")
+  )
+  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
+  Execute( download_jce,
+        path = ["/bin","/usr/bin/"],
+        not_if =format("test -e {jce_curl_target}"),
+        ignore_failures = True
+  )
+  
+  if params.security_enabled:
+    security_dir = format("{java_home}/jre/lib/security")
+    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
+    Execute(extract_cmd,
+          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+          cwd  = security_dir,
+          path = ['/bin/','/usr/bin']
+    )
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
+       content=Template("snmpd.conf.j2"))
+  Service("snmpd",
+          action = "restart")
+
+  Execute("/bin/echo 0 > /selinux/enforce",
+          only_if="test -f /selinux/enforce"
+  )
+
+  install_snappy()
+
+  #directories
+  Directory(params.hadoop_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hdfs_log_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hadoop_pid_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+
+  #files
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if tc_mode:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
+    File(os.path.join(params.hadoop_conf_dir, file),
+         owner=tc_owner,
+         content=Template(file + ".j2")
+    )
+
+  health_check_template = "health_check" #for stack 1 use 'health_check'
+  File(os.path.join(params.hadoop_conf_dir, "health_check"),
+       owner=tc_owner,
+       content=Template(health_check_template + ".j2")
+  )
+
+  log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+  if (params.log4j_props != None):
+    PropertiesFile(log4j_filename,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hdfs_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+    File(log4j_filename,
+         mode=0644,
+         group=params.user_group,
+         owner=params.hdfs_user,
+    )
+
+  update_log4j_props(log4j_filename)
+
+  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+       owner=params.hdfs_user,
+       content=Template("hadoop-metrics2.properties.j2")
+  )
+
+  db_driver_dload_cmd = ""
+  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+
+  if db_driver_dload_cmd:
+    Execute(db_driver_dload_cmd,
+            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
+    )
+
+
+def setup_configs():
+  """
+  Creates configs for services DHFS mapred
+  """
+  import params
+
+  if "mapred-queue-acls" in params.config['configurations']:
+    XmlConfig("mapred-queue-acls.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'mapred-queue-acls'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+  elif os.path.exists(
+      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
+    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  File(params.task_log4j_properties_location,
+       content=StaticFile("task-log4j.properties"),
+       mode=0755
+  )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  # if params.stack_version[0] == "1":
+  Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
+       to = '/usr/lib/hadoop/hadoop-tools.jar'
+  )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+    File(os.path.join(params.hadoop_conf_dir, 'masters'),
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  # generate_include_file()
+
+def update_log4j_props(file):
+  import params
+
+  for key in params.rca_property_map:
+    value = params.rca_property_map[key]
+    Execute(format(
+      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}")
+    )
+
+
+def generate_include_file():
+  import params
+
+  if params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+
+def install_snappy():
+  import params
+
+  snappy_so = "libsnappy.so"
+  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+  so_src_dir_x86 = format("{hadoop_home}/lib")
+  so_src_dir_x64 = format("{hadoop_home}/lib64")
+  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+  Execute(
+    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
+  Execute(
+    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..77e458f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..bb5795b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..51e2bac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-env.sh.j2
@@ -0,0 +1,121 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+# this is different for HDP1 #
+# Path to jsvc required by secure HDP 2.0 datanode
+# export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..a6a66ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2
new file mode 100644
index 0000000..ca7baa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/hdfs.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2
new file mode 100644
index 0000000..cb7b12b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check-v2.j2
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..b84b336
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/slaves.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2
new file mode 100644
index 0000000..3530444
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/snmpd.conf.j2
@@ -0,0 +1,48 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
+group   notConfigGroup v1           notConfigUser
+group   notConfigGroup v2c           notConfigUser
+view    systemview    included   .1
+access  notConfigGroup ""      any       noauth    exact  systemview none none
+
+syslocation Hadoop 
+syscontact HadoopMaster 
+dontLogTCPWrappersConnects yes
+
+###############################################################################
+# disk checks
+
+disk / 10000
+
+
+###############################################################################
+# load average checks
+#
+
+# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
+#
+# 1MAX:   If the 1 minute load average is above this limit at query
+#         time, the errorFlag will be set.
+# 5MAX:   Similar, but for 5 min average.
+# 15MAX:  Similar, but for 15 min average.
+
+# Check for loads:
+#load 12 14 14
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000..d01d37e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/templates/taskcontroller.cfg.j2
@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
index 185f685..bebb54e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/FLUME/metainfo.xml
@@ -18,12 +18,13 @@
 <metainfo>
     <user>root</user>
     <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.2.0</version>
+    <version>1.3.1.1.3.3.0</version>
 
     <components>
         <component>
             <name>FLUME_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
         </component>
     </components>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
index 1a895b8..36381ce 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
@@ -16,29 +16,98 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GANGLIA</name>
+      <comment>Ganglia Metrics Collection system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
+          <name>GANGLIA_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/ganglia_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
+          <name>GANGLIA_MONITOR</name>
+          <category>SLAVE</category>
+          <cardinality>ALL</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/ganglia_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>libganglia-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmetad-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-web-3.5.7-99.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>python-rrdtool.x86_64</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-modules-python-3.5.0-99</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>apache2</name>
+          </package>
+          <package>
+            <type>rpm</type>
+            <name>apache2-mod_php5</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh
new file mode 100644
index 0000000..e60eb31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh
new file mode 100644
index 0000000..0cec8dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh
new file mode 100644
index 0000000..d94db5d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init
new file mode 100644
index 0000000..20b388e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetad.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmetad startup script
+# processname: hdp-gmetad
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh
new file mode 100644
index 0000000..e28610e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmetadLib.sh
@@ -0,0 +1,204 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+# New Default RRA
+# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
+# Two weeks of data points at 1 minute resolution (average)
+#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
+# Retaining existing resolution
+RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+     "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init
new file mode 100644
index 0000000..afb7026
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmond.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmond startup script
+# processname: hdp-gmond
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
new file mode 100644
index 0000000..87da4dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/files/gmondLib.sh
@@ -0,0 +1,545 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+udp_recv_channel {
+    port = 0
+}
+
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}


[45/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php
new file mode 100644
index 0000000..487eb43
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/files/hdp_nagios_init.php
@@ -0,0 +1,81 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Common functions called from other alerts
+ *
+ */
+ 
+ /*
+ * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
+ * make kinit call in this case.
+ */
+  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
+    if($security_enabled === 'true') {
+    
+      $is_logined = is_logined($principal_name);
+      
+      if (!$is_logined)
+        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
+      else
+        $status = array(0, '');
+    } else {
+      $status = array(0, '');
+    }
+  
+    return $status;
+  }
+  
+  
+  /*
+  * Checks if user is logined on kerberos
+  */
+  function is_logined($principal_name) {
+    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
+    $check_output =  shell_exec($check_cmd);
+    
+    if ($check_output)
+      return false;
+    else
+      return true;
+  }
+
+  /*
+  * Runs kinit command.
+  */
+  function kinit($kinit_path_local, $keytab_path, $principal_name) {
+    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
+    $kinit_output = shell_exec($init_cmd);
+    if ($kinit_output) 
+      $status = array(1, $kinit_output);
+    else
+      $status = array(0, '');
+      
+    return $status;
+  }
+
+  function logout() {
+    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
+      $status = true;
+    else
+      $status = false;
+      
+    return $status;
+  }
+ 
+ ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py
new file mode 100644
index 0000000..964225e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/functions.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.libraries.script.config_dictionary import UnknownConfiguration
+
+def get_port_from_url(address):
+  if not is_empty(address):
+    return address.split(':')[-1]
+  else:
+    return address
+  
+def is_empty(var):
+  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py
new file mode 100644
index 0000000..9150995
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from nagios_server_config import nagios_server_config
+
+def nagios():
+  import params
+
+  File( params.nagios_httpd_config_file,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    content = Template("nagios.conf.j2"),
+    mode   = 0644
+  )
+
+  # enable snmpd
+  Execute( "service snmpd start; chkconfig snmpd on",
+    path = "/usr/local/bin/:/bin/:/sbin/"
+  )
+  
+  Directory( params.conf_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+
+  Directory( [params.plugins_dir, params.nagios_obj_dir])
+
+  Directory( params.nagios_pid_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755,
+    recursive = True
+  )
+
+  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    recursive = True
+  )
+  
+  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755
+  )
+
+  nagios_server_config()
+
+  set_web_permisssions()
+
+  File( format("{conf_dir}/command.cfg"),
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+  
+  
+def set_web_permisssions():
+  import params
+
+  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
+  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
+  Execute( cmd,
+    not_if = test
+  )
+
+  File( "/etc/nagios/htpasswd.users",
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode  = 0640
+  )
+
+  if System.get_instance().os_family == "suse":
+    command = format("usermod -G {nagios_group} wwwrun")
+  else:
+    command = format("usermod -a -G {nagios_group} apache")
+  
+  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py
new file mode 100644
index 0000000..02685c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from nagios import nagios
+from nagios_service import nagios_service
+
+         
+class NagiosServer(Script):
+  def install(self, env):
+    remove_conflicting_packages()
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    nagios()
+
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    self.configure(env) # done for updating configs after Security enabled
+    nagios_service(action='start')
+
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    
+    nagios_service(action='stop')
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nagios_pid_file)
+    
+def remove_conflicting_packages():  
+  Package( 'hdp_mon_nagios_addons',
+    action = "remove"
+  )
+
+  Package( 'nagios-plugins',
+    action = "remove"
+  )
+
+  Execute( "rpm -e --allmatches --nopostun nagios",
+    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+    ignore_failures = True 
+  )
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
+  stroutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
+  
+  NagiosServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py
new file mode 100644
index 0000000..9f6c884
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_server_config.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_server_config():
+  import params
+  
+  nagios_server_configfile( 'nagios.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'resource.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'hadoop-hosts.cfg')
+  nagios_server_configfile( 'hadoop-hostgroups.cfg')
+  nagios_server_configfile( 'hadoop-servicegroups.cfg')
+  nagios_server_configfile( 'hadoop-services.cfg')
+  nagios_server_configfile( 'hadoop-commands.cfg')
+  nagios_server_configfile( 'contacts.cfg')
+  
+  if System.get_instance().os_family != "suse":
+    nagios_server_configfile( 'nagios',
+                              config_dir = '/etc/init.d/', 
+                              mode = 0755, 
+                              owner = 'root', 
+                              group = 'root'
+    )
+
+  nagios_server_check( 'check_cpu.pl')
+  nagios_server_check( 'check_datanode_storage.php')
+  nagios_server_check( 'check_aggregate.php')
+  nagios_server_check( 'check_hdfs_blocks.php')
+  nagios_server_check( 'check_hdfs_capacity.php')
+  nagios_server_check( 'check_rpcq_latency.php')
+  nagios_server_check( 'check_webui.sh')
+  nagios_server_check( 'check_name_dir_status.php')
+  nagios_server_check( 'check_oozie_status.sh')
+  nagios_server_check( 'check_templeton_status.sh')
+  nagios_server_check( 'check_hive_metastore_status.sh')
+  nagios_server_check( 'check_hue_status.sh')
+  nagios_server_check( 'check_mapred_local_dir_used.sh')
+  nagios_server_check( 'check_nodemanager_health.sh')
+  nagios_server_check( 'check_namenodes_ha.sh')
+  nagios_server_check( 'hdp_nagios_init.php')
+
+
+def nagios_server_configfile(
+  name,
+  owner = None,
+  group = None,
+  config_dir = None,
+  mode = None
+):
+  import params
+  owner = params.nagios_user if not owner else owner
+  group = params.user_group if not group else group
+  config_dir = params.nagios_obj_dir if not config_dir else config_dir
+  
+  TemplateConfig( format("{config_dir}/{name}"),
+    owner          = owner,
+    group          = group,
+    mode           = mode
+  )
+
+def nagios_server_check(name):
+  File( format("{plugins_dir}/{name}"),
+    content = StaticFile(name), 
+    mode = 0755
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py
new file mode 100644
index 0000000..cc411b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/nagios_service.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_service(action='start'): # start or stop
+  import params
+
+  if action == 'start':
+   command = "service nagios start"
+  elif action == 'stop':
+   command = format("service nagios stop && rm -f {nagios_pid_file}")
+
+  Execute( command,
+     path    = "/usr/local/bin/:/bin/:/sbin/"      
+  )
+  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
new file mode 100644
index 0000000..870a0db
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from functions import get_port_from_url
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/nagios"
+nagios_var_dir = "/var/nagios"
+nagios_rw_dir = "/var/nagios/rw"
+plugins_dir = "/usr/lib64/nagios/plugins"
+nagios_obj_dir = "/etc/nagios/objects"
+check_result_path = "/var/nagios/spool/checkresults"
+nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
+nagios_log_dir = "/var/log/nagios"
+nagios_log_archives_dir = format("{nagios_log_dir}/archives")
+nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
+nagios_lookup_daemon_str = "/usr/sbin/nagios"
+nagios_pid_dir = status_params.nagios_pid_dir
+nagios_pid_file = status_params.nagios_pid_file
+nagios_resource_cfg = format("{conf_dir}/resource.cfg")
+nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
+nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
+nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
+nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
+eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
+nagios_principal_name = default("nagios_principal_name", "nagios")
+hadoop_ssl_enabled = False
+
+namenode_metadata_port = "8020"
+oozie_server_port = "11000"
+# different to HDP2    
+namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.http.address'])
+# different to HDP2  
+snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.secondary.http.address"])
+
+hbase_master_rpc_port = "60000"
+rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+nm_port = "8042"
+hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
+journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
+datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
+flume_port = "4159"
+hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
+templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
+hbase_rs_port = "60030"
+
+# this 4 is different for HDP2
+jtnode_port = get_port_from_url(config['configurations']['mapred-site']['mapred.job.tracker.http.address'])
+jobhistory_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
+tasktracker_port = "50060"
+mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
+
+# this is different for HDP2
+nn_metrics_property = "FSNamesystemMetrics"
+clientPort = config['configurations']['global']['clientPort'] #ZK 
+
+
+java64_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['global']['security_enabled']
+
+nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+ganglia_port = "8651"
+ganglia_collector_slaves_port = "8660"
+ganglia_collector_namenode_port = "8661"
+ganglia_collector_jobtracker_port = "8662"
+ganglia_collector_hbase_port = "8663"
+ganglia_collector_rm_port = "8664"
+ganglia_collector_nm_port = "8660"
+ganglia_collector_hs_port = "8666"
+  
+all_ping_ports = config['clusterHostInfo']['all_ping_ports']
+
+if System.get_instance().os_family == "suse":
+  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
+  htpasswd_cmd = "htpasswd2"
+else:
+  nagios_p1_pl = "/usr/bin/p1.pl"
+  htpasswd_cmd = "htpasswd"
+  
+nagios_user = config['configurations']['global']['nagios_user']
+nagios_group = config['configurations']['global']['nagios_group']
+nagios_web_login = config['configurations']['global']['nagios_web_login']
+nagios_web_password = config['configurations']['global']['nagios_web_password']
+user_group = config['configurations']['global']['user_group']
+nagios_contact = config['configurations']['global']['nagios_contact']
+
+namenode_host = default("/clusterHostInfo/namenode_host", None)
+_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
+_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
+_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
+_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
+_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
+_rm_host = default("/clusterHostInfo/rm_host", None)
+_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
+_hs_host = default("/clusterHostInfo/hs_host", None)
+_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
+_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
+_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
+_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
+_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
+_oozie_server = default("/clusterHostInfo/oozie_server",None)
+_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
+# can differ on HDP2
+_mapred_tt_hosts = _slave_hosts
+#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
+_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
+all_hosts = config['clusterHostInfo']['all_hosts']
+
+
+hostgroup_defs = {
+    'namenode' : namenode_host,
+    'snamenode' : _snamenode_host,
+    'slaves' : _slave_hosts,
+    # no in HDP2
+    'tasktracker-servers' : _mapred_tt_hosts,
+    'agent-servers' : all_hosts,
+    'nagios-server' : _nagios_server_host,
+    'jobtracker' : _jtnode_host,
+    'ganglia-server' : _ganglia_server_host,
+    'flume-servers' : _flume_hosts,
+    'zookeeper-servers' : _zookeeper_hosts,
+    'hbasemasters' : hbase_master_hosts,
+    'hiveserver' : _hive_server_host,
+    'region-servers' : _hbase_rs_hosts,
+    'oozie-server' : _oozie_server,
+    'webhcat-server' : _webhcat_server_host,
+    'hue-server' : _hue_server_host,
+    'resourcemanager' : _rm_host,
+    'nodemanagers' : _nm_hosts,
+    'historyserver2' : _hs_host,
+    'journalnodes' : _journalnode_hosts
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py
new file mode 100644
index 0000000..33b35fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+nagios_pid_dir = "/var/run/nagios"
+nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2
new file mode 100644
index 0000000..9dada51
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/contacts.cfg.j2
@@ -0,0 +1,91 @@
+###############################################################################
+# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
+#
+# Last Modified: 05-31-2007
+#
+# NOTES: This config file provides you with some example contact and contact
+#        group definitions that you can reference in host and service
+#        definitions.
+#       
+#        You don't need to keep these definitions in a separate file from your
+#        other object definitions.  This has been done just to make things
+#        easier to understand.
+#
+###############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+
+###############################################################################
+###############################################################################
+#
+# CONTACTS
+#
+###############################################################################
+###############################################################################
+
+# Just one contact defined by default - the Nagios admin (that's you)
+# This contact definition inherits a lot of default values from the 'generic-contact' 
+# template which is defined elsewhere.
+
+define contact{
+        contact_name    {{nagios_web_login}}                                        ; Short name of user
+        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
+        alias           Nagios Admin                                                ; Full name of user
+
+        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
+        }
+
+# Contact which writes all Nagios alerts to the system logger.
+define contact{
+        contact_name                    sys_logger         ; Short name of user
+        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
+        alias                           System Logger      ; Full name of user
+        host_notifications_enabled      1
+        service_notifications_enabled   1
+        service_notification_period     24x7
+        host_notification_period        24x7
+        service_notification_options    w,u,c,r,s
+        host_notification_options       d,u,r,s
+        can_submit_commands             1
+        retain_status_information       1
+        service_notification_commands   service_sys_logger
+        host_notification_commands      host_sys_logger
+        }
+
+###############################################################################
+###############################################################################
+#
+# CONTACT GROUPS
+#
+###############################################################################
+###############################################################################
+
+# We only have one contact in this simple configuration file, so there is
+# no need to create more than one contact group.
+
+define contactgroup {
+        contactgroup_name       admins
+        alias                   Nagios Administrators
+        members                 {{nagios_web_login}},sys_logger
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
new file mode 100644
index 0000000..99870d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
@@ -0,0 +1,114 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% if env.system.os_family != "suse" %}
+# 'check_cpu' check remote cpu load
+define command {
+        command_name    check_cpu
+        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
+       }
+{% endif %}
+
+# Check data node storage full 
+define command {
+        command_name    check_datanode_storage
+        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
+       }
+
+define command{
+        command_name    check_hdfs_blocks
+        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
+       }
+
+define command{
+        command_name    check_hdfs_capacity
+        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_aggregate
+        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_rpcq_latency
+        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_nagios
+        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
+       }
+
+define command{
+        command_name    check_webui
+        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
+       }
+
+define command{
+        command_name    check_name_dir_status
+        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
+       }
+
+define command{
+        command_name    check_oozie_status
+        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_templeton_status
+        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_hive_metastore_status
+        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+define command{
+        command_name    check_hue_status
+        command_line    $USER1$/check_hue_status.sh
+       }
+
+define command{
+       command_name    check_mapred_local_dir_used_space
+       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
+       }
+
+define command{
+       command_name    check_namenodes_ha
+       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
+       }
+
+define command{
+        command_name    check_nodemanager_health
+        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
+       }
+
+define command{
+        command_name    host_sys_logger
+        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
+       }
+
+define command{
+        command_name    service_sys_logger
+        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
+       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
new file mode 100644
index 0000000..d24e5cd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+{% for name, hosts in hostgroup_defs.iteritems() %}
+{% if hosts %}
+define hostgroup {
+        hostgroup_name  {{name}}
+        alias           {{name}}
+        members         {{','.join(hosts)}}
+}
+{% endif %}
+{% endfor %}
+
+define hostgroup {
+        hostgroup_name  all-servers
+        alias           All Servers
+        members         {{','.join(all_hosts)}}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
new file mode 100644
index 0000000..778e4f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+{% for host in all_hosts %}
+define host {
+        alias        {{host}}
+        host_name    {{host}}
+        use          linux-server
+        address      {{host}}
+        check_interval         0.25
+        retry_interval         0.25
+        max_check_attempts     4
+        notifications_enabled     1
+        first_notification_delay  0     # Send notification soon after change in the hard state
+        notification_interval     0     # Send the notification once
+        notification_options      d,u,r
+}
+
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
new file mode 100644
index 0000000..233051f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+{% if hostgroup_defs['namenode'] or
+  hostgroup_defs['snamenode']  or
+  hostgroup_defs['slaves'] %}
+define servicegroup {
+  servicegroup_name  HDFS
+  alias  HDFS Checks
+}
+{% endif %}
+{%if hostgroup_defs['jobtracker'] or
+  hostgroup_defs['historyserver2']-%}
+define servicegroup {
+  servicegroup_name  MAPREDUCE
+  alias  MAPREDUCE Checks
+}
+{% endif %}
+{%if hostgroup_defs['resourcemanager'] or
+  hostgroup_defs['nodemanagers'] %}
+define servicegroup {
+  servicegroup_name  YARN
+  alias  YARN Checks
+}
+{% endif %}
+{%if hostgroup_defs['flume-servers'] %}
+define servicegroup {
+  servicegroup_name  FLUME
+  alias  FLUME Checks
+}
+{% endif %}
+{%if hostgroup_defs['hbasemasters'] %}
+define servicegroup {
+  servicegroup_name  HBASE
+  alias  HBASE Checks
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+define servicegroup {
+  servicegroup_name  OOZIE
+  alias  OOZIE Checks
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+define servicegroup {
+  servicegroup_name  WEBHCAT
+  alias  WEBHCAT Checks
+}
+{% endif %}
+{% if hostgroup_defs['nagios-server'] %}
+define servicegroup {
+  servicegroup_name  NAGIOS
+  alias  NAGIOS Checks
+}
+{% endif %}
+{% if hostgroup_defs['ganglia-server'] %}
+define servicegroup {
+  servicegroup_name  GANGLIA
+  alias  GANGLIA Checks
+}
+{% endif %}
+{% if hostgroup_defs['hiveserver'] %}
+define servicegroup {
+  servicegroup_name  HIVE-METASTORE
+  alias  HIVE-METASTORE Checks
+}
+{% endif %}
+{% if hostgroup_defs['zookeeper-servers'] %}
+define servicegroup {
+  servicegroup_name  ZOOKEEPER
+  alias  ZOOKEEPER Checks
+}
+{% endif %}
+define servicegroup {
+  servicegroup_name  AMBARI
+  alias  AMBARI Checks
+}
+{% if hostgroup_defs['hue-server'] %}
+define servicegroup {
+  servicegroup_name  HUE
+  alias  HUE Checks
+}
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
new file mode 100644
index 0000000..793732e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
@@ -0,0 +1,714 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+{# TODO: Look for { or } in created file #}
+# NAGIOS SERVER Check (status log update)
+{% if hostgroup_defs['nagios-server'] %}
+define service {
+        name                            hadoop-service
+        use                             generic-service
+        notification_options            w,u,c,r,f,s
+        first_notification_delay        0
+        notification_interval           0                 # Send the notification once
+        contact_groups                  admins
+        notifications_enabled           1
+        event_handler_enabled           1
+        register                        0
+}
+
+define service {        
+        hostgroup_name          nagios-server        
+        use                     hadoop-service
+        service_description     NAGIOS::Nagios status log freshness
+        servicegroups           NAGIOS
+        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
+        normal_check_interval   5
+        retry_check_interval    0.5
+        max_check_attempts      2
+}
+
+# NAGIOS SERVER HDFS Checks
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes with space available
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+# AMBARI AGENT Checks
+{% for hostname in all_hosts %}
+define service {
+        host_name	        {{ hostname }}
+        use                     hadoop-service
+        service_description     AMBARI::Ambari Agent process
+        servicegroups           AMBARI
+        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% endfor %}
+
+# NAGIOS SERVER ZOOKEEPER Checks
+{% if hostgroup_defs['zookeeper-servers'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
+        servicegroups           ZOOKEEPER
+        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+# NAGIOS SERVER HBASE Checks
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HBASE::Percent RegionServers live
+        servicegroups           HBASE
+        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+
+
+# GANGLIA SERVER Checks
+{% if hostgroup_defs['ganglia-server'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Server process
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for NameNode
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% if hostgroup_defs['jobtracker'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for JobTracker
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HBase Master
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['resourcemanager'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% endif %}
+
+{% if hostgroup_defs['snamenode'] %}
+# Secondary namenode checks
+define service {
+        hostgroup_name          snamenode
+        use                     hadoop-service
+        service_description     NAMENODE::Secondary NameNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['namenode'] %}
+# HDFS Checks
+{%  for namenode_hostname in namenode_host %}
+{# TODO: check if we can get rid of str, lower #}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if env.system.os_family != "suse" %}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_webui!namenode!{{ namenode_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      5
+}
+
+{%  endfor  %}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Blocks health
+        servicegroups           HDFS
+        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::HDFS capacity utilization
+        servicegroups           HDFS
+        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   10
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+{% endif %}
+
+# MAPREDUCE Checks
+{% if hostgroup_defs['jobtracker'] %}
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     JOBTRACKER::JobTracker Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!jobtracker!{{ jtnode_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     JOBTRACKER::HistoryServer Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!jobhistory!{{ jobhistory_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+{% if env.system.os_family != "suse" %}
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     JOBTRACKER::JobTracker CPU utilization
+        servicegroups           MAPREDUCE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        use                     hadoop-service
+        service_description     JOBTRACKER::JobTracker process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ jtnode_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     MAPREDUCE::JobTracker RPC latency
+        servicegroups           MAPREDUCE
+        check_command           check_rpcq_latency!JobTracker!{{ jtnode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+{% if hostgroup_defs['tasktracker-servers'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     MAPREDUCE::Percent TaskTrackers live
+        servicegroups           MAPREDUCE
+        check_command           check_aggregate!"TASKTRACKER::TaskTracker process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+# MAPREDUCE::TASKTRACKER Checks 
+define service {
+        hostgroup_name          tasktracker-servers
+        use                     hadoop-service
+        service_description     TASKTRACKER::TaskTracker process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ tasktracker_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+# MAPREDUCE::TASKTRACKER Mapreduce local dir used space
+define service {
+        hostgroup_name          tasktracker-servers
+        use                     hadoop-service
+        service_description     ::MapReduce local dir space
+        servicegroups           MAPREDUCE
+        check_command           check_mapred_local_dir_used_space!{{ mapred_local_dir }}!85%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+{% endif %}
+{% endif %}
+
+{% if hostgroup_defs['resourcemanager'] %}
+# YARN::RESOURCEMANAGER Checks 
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager Web UI
+        servicegroups           YARN
+        check_command           check_webui!resourcemanager!{{ rm_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.os_family != "suse" %}
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
+        servicegroups           YARN
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager RPC latency
+        servicegroups           YARN
+        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['nodemanagers'] %}
+# YARN::NODEMANAGER Checks
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager health
+        servicegroups           YARN
+        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     NODEMANAGER::Percent NodeManagers live
+        servicegroups           YARN
+        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+# MAPREDUCE::JOBHISTORY Checks
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!historyserver2!{{ hs_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.os_family != "suse" %}
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer CPU utilization
+        servicegroups           MAPREDUCE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{%  endif %}
+
+{% if hostgroup_defs['journalnodes'] %}
+# Journalnode checks
+define service {
+        hostgroup_name          journalnodes
+        use                     hadoop-service
+        service_description     JOURNALNODE::JournalNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if dfs_ha_enabled %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent JournalNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+{% if hostgroup_defs['slaves'] %}
+# HDFS::DATANODE Checks
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode space
+        servicegroups           HDFS
+        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      2
+}
+
+{% endif %}
+
+{% if hostgroup_defs['flume-servers'] %}
+# FLUME Checks
+define service {
+        hostgroup_name          flume-servers
+        use                     hadoop-service
+        service_description     FLUME::Flume Agent process
+        servicegroups           FLUME
+        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['zookeeper-servers'] %}
+# ZOOKEEPER Checks
+define service {
+        hostgroup_name          zookeeper-servers
+        use                     hadoop-service
+        service_description     ZOOKEEPER::ZooKeeper Server process
+        servicegroups           ZOOKEEPER
+        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+# HBASE::REGIONSERVER Checks
+define service {
+        hostgroup_name          region-servers
+        use                     hadoop-service
+        service_description     REGIONSERVER::RegionServer process
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{# HBASE:: MASTER Checks
+# define service {
+#         hostgroup_name          hbasemasters
+#         use                     hadoop-service
+#         service_description     HBASEMASTER::HBase Master Web UI
+#         servicegroups           HBASE
+#         check_command           check_webui!hbase!{{ hbase_master_port }}
+#         normal_check_interval   1
+#         retry_check_interval    1
+#         max_check_attempts      3
+# #}
+{%  for hbasemaster in hbase_master_hosts  %}
+{% if env.system.os_family != "suse" %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endfor %}
+{% endif %}
+
+{% if hostgroup_defs['hiveserver'] %}
+# HIVE Metastore check
+define service {
+        hostgroup_name          hiveserver
+        use                     hadoop-service
+        service_description     HIVE-METASTORE::Hive Metastore status
+        servicegroups           HIVE-METASTORE
+        {% if security_enabled %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+# Oozie check
+define service {
+        hostgroup_name          oozie-server
+        use                     hadoop-service
+        service_description     OOZIE::Oozie Server status
+        servicegroups           OOZIE
+        {% if security_enabled %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+# WEBHCAT check
+define service {
+        hostgroup_name          webhcat-server
+        use                     hadoop-service
+        service_description     WEBHCAT::WebHCat Server status
+        servicegroups           WEBHCAT 
+        {% if security_enabled %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hue-server'] %}
+define service {
+        hostgroup_name          hue-server
+        use                     hadoop-service
+        service_description     HUE::Hue Server status
+        servicegroups           HUE
+        check_command           check_hue_status
+        normal_check_interval   100
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+


[43/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
new file mode 100644
index 0000000..c44fcf4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
new file mode 100644
index 0000000..270a1a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#      http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+# export OOZIE_HTTP_PORT=11000
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-log4j.xml
new file mode 100644
index 0000000..a0db719
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-log4j.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log4j.logger.org.apache.pig</name>
+    <value>info, A</value>
+  </property>
+  <property>
+    <name>log4j.appender.A</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.A.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.A.layout.ConversionPattern</name>
+    <value>%-4r [%t] %-5p %c %x - %m%n</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
index 8efacb4..2b66bbf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
@@ -16,15 +16,47 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.11.1.1.3.2.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.11.1.1.3.3.0</version>
+      <components>
         <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
+          <name>PIG</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>pig-log4j</config-type>
+      </configuration-dependencies>
 
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/files/pigSmoke.sh
new file mode 100644
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
new file mode 100644
index 0000000..781e635
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+pig_conf_dir = "/etc/pig/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hdfs_user = config['configurations']['global']['hdfs_user']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"
+
+#log4j.properties
+if ('pig-log4j' in config['configurations']):
+  log4j_props = config['configurations']['pig-log4j']
+else:
+  log4j_props = None
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
new file mode 100644
index 0000000..0450d5e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+
+def pig():
+  import params
+
+  Directory( params.pig_conf_dir,
+    owner = params.hdfs_user,
+    group = params.user_group
+  )
+
+  pig_TemplateConfig( ['pig-env.sh','pig.properties'])
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+                   dir=params.pig_conf_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hdfs_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
+    File(format("{params.pig_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hdfs_user
+    )
+  
+  
+def pig_TemplateConfig(name):
+  import params
+  
+  if not isinstance(name, list):
+    name = [name]
+    
+  for x in name:
+    TemplateConfig( format("{pig_conf_dir}/{x}"),
+        owner = params.hdfs_user
+    )
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig_client.py
new file mode 100644
index 0000000..acd0cb1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from pig import pig
+
+         
+class PigClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    pig()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/service_check.py
new file mode 100644
index 0000000..3cca087
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/service_check.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class PigServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    input_file = 'passwd'
+    output_file = "pigsmoke.out"
+  
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    test_cmd = format("fs -test -e {output_file}")
+  
+    ExecuteHadoop( create_file_cmd,
+      tries     = 3,
+      try_sleep = 5,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+  
+    File( '/tmp/pigSmoke.sh',
+      content = StaticFile("pigSmoke.sh"),
+      mode = 0755
+    )
+  
+    Execute( "pig /tmp/pigSmoke.sh",
+      tries     = 3,
+      try_sleep = 5,
+      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      user      = params.smokeuser,
+      logoutput = True
+    )
+  
+    ExecuteHadoop( test_cmd,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigServiceCheck().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig-env.sh.j2
new file mode 100644
index 0000000..b0e17d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig-env.sh.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig.properties.j2
new file mode 100644
index 0000000..6fcb233
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig.properties.j2
@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Pig configuration file. All values can be overwritten by command line arguments.
+
+# log4jconf log4j configuration file
+# log4jconf=./conf/log4j.properties
+
+# a file that contains pig script
+#file=
+
+# load jarfile, colon separated
+#jar=
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=local
+
+#pig.logfile=
+
+#Do not spill temp files smaller than this size (bytes)
+#pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+#pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+#pig.exec.reducers.bytes.per.reducer=1000000000
+#pig.exec.reducers.max=999
+
+#Use this option only when your Pig job will otherwise die because of
+#using more counter than hadoop configured limit
+#pig.disable.counter=true
+hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml
index 1f03a35..426bb25 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml
@@ -16,15 +16,62 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.3.1.3.2.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.3.1.3.3.0</version>
 
-    <components>
+      <components>
         <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
+          <name>SQOOP</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>sqoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/__init__.py
new file mode 100644
index 0000000..3860581
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py
new file mode 100644
index 0000000..21a39d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+security_enabled = config['configurations']['global']['security_enabled']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+hbase_home = "/usr"
+hive_home = "/usr"
+zoo_conf_dir = "/etc/zookeeper"
+sqoop_lib = "/usr/lib/sqoop/lib"
+sqoop_user = "sqoop"
+
+keytab_path = config['configurations']['global']['keytab_path']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/service_check.py
new file mode 100644
index 0000000..b872be6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/service_check.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
+    Execute("sqoop version",
+            user = params.smokeuser,
+            logoutput = True
+    )
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop.py
new file mode 100644
index 0000000..492550e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop.py
@@ -0,0 +1,51 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import sys
+
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group
+  )
+  sqoop_TemplateConfig("sqoop-env.sh")
+  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
+          owner = params.sqoop_user,
+          group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  pass
+
+def sqoop_TemplateConfig(name, tag=None):
+  import params
+  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
+                  owner = params.sqoop_user,
+                  template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..bd2863c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop_client.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+from resource_management import *
+
+from sqoop import sqoop
+
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/templates/sqoop-env.sh.j2
new file mode 100644
index 0000000..90cbc75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/templates/sqoop-env.sh.j2
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml
index d88013f..d6c2a1f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml
@@ -16,20 +16,82 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.11.0.1.3.2.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>WEBHCAT</name>
+      <comment>This is comment for WEBHCAT service</comment>
+      <version>0.11.0.1.3.3.0</version>
+      <components>
         <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
+          <name>WEBHCAT_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-    <config-type>webhcat-site</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <configuration-dependencies>
+        <config-type>webhcat-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..cefc4f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/files/templetonSmoke.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py
new file mode 100644
index 0000000..83211e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+webhcat_user = config['configurations']['global']['webhcat_user']
+download_url = config['configurations']['global']['apache_artifacts_download_url']
+
+config_dir = '/etc/hcatalog/conf'
+
+templeton_log_dir = config['configurations']['global']['hcat_log_dir']
+templeton_pid_dir = status_params.templeton_pid_dir
+
+pid_file = status_params.pid_file
+
+hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+hadoop_home = '/usr'
+user_group = config['configurations']['global']['user_group']
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/service_check.py
new file mode 100644
index 0000000..58b4d25
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/service_check.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+class WebHCatServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File('/tmp/templetonSmoke.sh',
+         content= StaticFile('templetonSmoke.sh'),
+         mode=0755
+    )
+
+    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+                 " {security_enabled} {kinit_path_local}",
+                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True)
+
+if __name__ == "__main__":
+  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py
new file mode 100644
index 0000000..21dde6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
+pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py
new file mode 100644
index 0000000..ae12f54
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import sys
+
+
+def webhcat():
+  import params
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.config_dir,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+  )
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=Template('webhcat-env.sh.j2')
+  )
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  copyFromLocal(path='/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
+                kinnit_if_needed=kinit_if_needed
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
+  )
+
+
+def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
+  import params
+
+  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
+  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
+
+  ExecuteHadoop(copy_cmd,
+                not_if=unless_cmd,
+                user=owner,
+                conf_dir=params.hadoop_conf_dir)
+
+  if not owner:
+    chown = None
+  else:
+    if not group:
+      chown = owner
+    else:
+      chown = format('{owner}:{group}')
+
+  if not chown:
+    chown_cmd = format("fs -chown {chown} {dest_dir}")
+
+    ExecuteHadoop(copy_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)
+
+  if not mode:
+    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
+
+    ExecuteHadoop(chmod_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py
new file mode 100644
index 0000000..4365111
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+
+class WebHCatServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    webhcat_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+if __name__ == "__main__":
+  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_service.py
new file mode 100644
index 0000000..12c3854
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_service.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service(action='start'):
+  import params
+
+  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
+
+  if action == 'start':
+    demon_cmd = format('{cmd} start')
+    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
+    Execute(demon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test
+    )
+  elif action == 'stop':
+    demon_cmd = format('{cmd} stop')
+    Execute(demon_cmd,
+            user=params.webhcat_user
+    )
+    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/templates/webhcat-env.sh.j2
new file mode 100644
index 0000000..9ea4a79
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/templates/webhcat-env.sh.j2
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
new file mode 100644
index 0000000..8fb48b9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log4j.appender.CONSOLE</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.CONSOLE.Threshold</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.appender.CONSOLE.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.CONSOLE.layout.ConversionPattern</name>
+    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.Threshold</name>
+    <value>DEBUG</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.File</name>
+    <value>zookeeper.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.MaxFileSize</name>
+    <value>10MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.layout.ConversionPattern</name>
+    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE</name>
+    <value>org.apache.log4j.FileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.Threshold</name>
+    <value>TRACE</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.File</name>
+    <value>zookeeper_trace.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.layout.ConversionPattern</name>
+    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml
index f14ac66..07817b1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml
@@ -16,24 +16,58 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.1.3.2.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.5.1.3.3.0</version>
+      <components>
 
-    <components>
         <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
+          <name>ZOOKEEPER_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
+          <name>ZOOKEEPER_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
+      </components>
 
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
 
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>zookeeper-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkEnv.sh
new file mode 100644
index 0000000..07017e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkServer.sh
new file mode 100644
index 0000000..49ceb4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkService.sh
new file mode 100644
index 0000000..32dfce4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkSmoke.sh
new file mode 100644
index 0000000..c1c11b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/files/zkSmoke.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smoke_script=$1
+smoke_user=$2
+conf_dir=$3
+client_port=$4
+security_enabled=$5
+kinit_path_local=$6
+smoke_user_keytab=$7
+export ZOOKEEPER_EXIT_CODE=0
+test_output_file=/tmp/zkSmoke.out
+errors_expr="ERROR|Exception"
+acceptable_expr="SecurityException"
+zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
+echo "zk_node1=$zk_node1"
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
+  su - $smoke_user -c "$kinitcmd"
+fi
+
+function verify_output() {
+  if [ -f $test_output_file ]; then
+    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
+    if [ "$?" -eq 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+# Delete /zk_smoketest znode if exists
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
+# Create /zk_smoketest znode on one zookeeper server
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+verify_output
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
+  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed" 
+else
+   echo "Zookeeper Smoke Test: Passed" 
+fi
+exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py
new file mode 100644
index 0000000..3c02248
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/zookeeper/conf"
+zk_user =  config['configurations']['global']['zk_user']
+hostname = config['hostname']
+zk_bin = '/usr/lib/zookeeper/bin'
+user_group = config['configurations']['global']['user_group']
+
+smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
+zk_log_dir = config['configurations']['global']['zk_log_dir']
+zk_data_dir = config['configurations']['global']['zk_data_dir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+tickTime = config['configurations']['global']['tickTime']
+initLimit = config['configurations']['global']['initLimit']
+syncLimit = config['configurations']['global']['syncLimit']
+clientPort = config['configurations']['global']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_primary_name = "zookeeper"
+zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
+zk_principal = zk_principal_name.replace('_HOST',hostname)
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+keytab_path = "/etc/security/keytabs"
+zk_keytab_path = format("{keytab_path}/zk.service.keytab")
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['global']['security_enabled']
+
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#log4j.properties
+if ('zookeeper-log4j' in config['configurations']):
+  log4j_props = config['configurations']['zookeeper-log4j']
+else:
+  log4j_props = None


[44/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2
new file mode 100644
index 0000000..acb2522
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.cfg.j2
@@ -0,0 +1,1349 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
+#
+# Read the documentation for more information on this configuration
+# file.  I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+# Last Modified: 12-14-2008
+#
+##############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes.  This should be the first option specified 
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/contacts.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# Definitions for monitoring the local (Linux) host
+#cfg_file=/etc/nagios/objects/localhost.cfg
+
+# Definitions for monitoring a Windows machine
+#cfg_file=/etc/nagios/objects/windows.cfg
+
+# Definitions for monitoring a router/switch
+#cfg_file=/etc/nagios/objects/switch.cfg
+
+# Definitions for monitoring a network printer
+#cfg_file=/etc/nagios/objects/printer.cfg
+
+# Definitions for hadoop servers
+cfg_file={{nagios_host_cfg}}
+cfg_file={{nagios_hostgroup_cfg}}
+cfg_file={{nagios_servicegroup_cfg}}
+cfg_file={{nagios_service_cfg}}
+cfg_file={{nagios_command_cfg}}
+
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts.  The CGIs read object definitions from 
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file.  You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start 
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions.  The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file={{nagios_resource_cfg}}
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored.  Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+#  restarts.
+
+status_file=/var/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and 
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.  
+# You can either supply a username or a UID.
+
+nagios_user={{nagios_user}}
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.  
+# You can either supply a group name or a GID.
+
+nagios_group={{nagios_group}}
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).  By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side.  If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later.  If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute.  If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly 
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody').  Permissions should be set at the 
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/nagios/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming 
+# external commands before they are processed.  As external commands 
+# are processed by the daemon, they are removed from the buffer.  
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file={{nagios_pid_file}}
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc.  This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values:  0      = Broker nothing
+#         -1      = Broker everything
+#         <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup.  Use multiple directives if you want
+# to load more than one module.  Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+#    1. Shutdown Nagios, replace the module file, restart Nagios
+#    2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+#   broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+#	n	= None - don't rotate the log
+#	h	= Hourly rotation (top of the hour)
+#	d	= Daily rotation (midnight every day)
+#	w	= Weekly rotation (midnight on Saturday evening)
+#	m	= Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be 
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1.  If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0.  If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0.  If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0.  If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1.  If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option.  In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0.  If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0.  If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!  This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed.  Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts.  Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks.  Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+#       s       = Use "smart" interleave factor calculation
+#       x       = Use an interleave factor of x, where x is a
+#                 number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed.  Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of 
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized.  A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that  a single
+# check result reaper event will be allowed to run before 
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!  
+
+check_result_path=/var/nagios/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid.  Files older than this 
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks.  Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state 
+# information when checking host and service dependencies. Normally 
+# Nagios will only use the latest hard host or service state when 
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option. 
+# Values:
+#  0 = Don't use soft state dependencies (default) 
+#  1 = Use soft state dependencies 
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time.  This can help balance the load on
+# the monitoring server.  
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks.  This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled.  Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off.  Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands.  All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down.  Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor.  This is useful for 
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts.  Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down.  The state 
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting.  If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set 
+# program status variables based on the values saved in the
+# retention file.  If you want to use retained program status
+# information, set this value to 1.  If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file.  If you
+# If you want to use retained scheduling info, set this
+# value to 1.  If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.  
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options.  For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options.  For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files.  Setting this to 60 means
+# that each interval is one minute long (60 seconds).  Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available.  It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios.  Nagios is critical to you - make sure you keep it in
+# good shape.  Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance 
+# with our privacy policy - see http://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option deterines what data Nagios will send to api.nagios.org when
+# it checks for updates.  By default, Nagios will send information on the 
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not.  Nagios Enterprises uses
+# this data to determine the number of users running specific version of 
+# Nagios.  Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default).  Otherwise set this value to 1 to
+# enable the aggressive check option.  Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started.  Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks.  If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below).  Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed.  These commands are executed only if the
+# enable_performance_data option (above) is set to 1.  The command
+# argument is the short name of a command definition that you 
+# define in your host configuration file.  Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files.  The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text.  A newline is automatically added after each write
+# to the performance data file.  Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below.  A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files.  The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_services option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_hosts option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios.  This option is useful
+# if you have distributed or failover monitoring setup.  In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts.  If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance.  Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT.  By default, a passive host check
+# result will put a host into a HARD state type.  This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically 
+# check for orphaned host service checks.  Since service checks are
+# not rescheduled until the results of their previous execution 
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled.  A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks.  Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results.  If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results.  If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".  
+# Flapping occurs when a host or service changes between
+# states too frequently.  When Nagios detects that a 
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping.  Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+#         0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does.  This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+#	us		(MM-DD-YYYY HH:MM:SS)
+#	euro    	(DD-MM-YYYY HH:MM:SS)
+#	iso8601		(YYYY-MM-DD HH:MM:SS)
+#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in.  If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path 
+# to include your timezone.  Example:
+#
+#   <Directory "/usr/local/nagios/sbin/">
+#      SetEnv TZ "Australia/Brisbane"
+#      ...
+#   </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located.  If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file = {{nagios_p1_pl}}
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime.  This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more 
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc.  This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+#	$HOSTOUTPUT$
+#	$HOSTPERFDATA$
+#	$HOSTACKAUTHOR$
+#	$HOSTACKCOMMENT$
+#	$SERVICEOUTPUT$
+#	$SERVICEPERFDATA$
+#	$SERVICEACKAUTHOR$
+#	$SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files.  Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression 
+# matching takes place in the object config files.  This option
+# only has an effect if regular expression matching is enabled
+# (see above).  If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?).  If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon.  Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes.  Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+#         0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+#         0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.  Enabling this option can cause performance issues in 
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+#         0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks).  If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+#        0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks).  Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems.  Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this.  If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+#        0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file.  OR values together to log multiple
+# types of information.
+# Values: 
+#          -1 = Everything
+#          0 = Nothing
+#	   1 = Functions
+#          2 = Configuration
+#          4 = Process information
+#	   8 = Scheduled events
+#          16 = Host/service checks
+#          32 = Notifications
+#          64 = Event broker
+#          128 = External commands
+#          256 = Commands
+#          512 = Scheduled downtime
+#          1024 = Comments
+#          2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+#         1 = More detailed
+#         2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file.  If
+# the file grows larger than this size, it will be renamed with a .old
+# extension.  If a file already exists with a .old extension it will
+# automatically be deleted.  This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2
new file mode 100644
index 0000000..d8936a0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.conf.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
+# Last Modified: 11-26-2005
+#
+# This file contains examples of entries that need
+# to be incorporated into your Apache web server
+# configuration file.  Customize the paths, etc. as
+# needed to fit your system.
+#
+
+ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
+
+<Directory "/usr/lib/nagios/cgi">
+#  SSLRequireSSL
+   Options ExecCGI
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+
+Alias /nagios "/usr/share/nagios"
+
+<Directory "/usr/share/nagios">
+#  SSLRequireSSL
+   Options None
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2
new file mode 100644
index 0000000..01e21ac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/nagios.j2
@@ -0,0 +1,146 @@
+#!/bin/sh
+# $Id$
+# Nagios	Startup script for the Nagios monitoring daemon
+#
+# chkconfig:	- 85 15
+# description:	Nagios is a service monitoring system
+# processname: nagios
+# config: /etc/nagios/nagios.cfg
+# pidfile: /var/nagios/nagios.pid
+#
+### BEGIN INIT INFO
+# Provides:		nagios
+# Required-Start:	$local_fs $syslog $network
+# Required-Stop:	$local_fs $syslog $network
+# Short-Description:    start and stop Nagios monitoring server
+# Description:		Nagios is is a service monitoring system 
+### END INIT INFO
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+prefix="/usr"
+exec_prefix="/usr"
+exec="/usr/sbin/nagios"
+prog="nagios"
+config="/etc/nagios/nagios.cfg"
+pidfile="{{nagios_pid_file}}"
+user="{{nagios_user}}"
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+lockfile=/var/lock/subsys/$prog
+
+start() {
+    [ -x $exec ] || exit 5
+    [ -f $config ] || exit 6
+    echo -n $"Starting $prog: "
+    daemon --user=$user $exec -d $config
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && touch $lockfile
+    return $retval
+}
+
+stop() {
+    echo -n $"Stopping $prog: "
+    killproc -d 10 $exec
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && rm -f $lockfile
+    return $retval
+}
+
+
+restart() {
+    stop
+    start
+}
+
+reload() {
+    echo -n $"Reloading $prog: "
+    killproc $exec -HUP
+    RETVAL=$?
+    echo
+}
+
+force_reload() {
+    restart
+}
+
+check_config() {
+        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
+        RETVAL=$?
+        if [ $RETVAL -ne 0 ] ; then
+                echo -n $"Configuration validation failed"
+                failure
+                echo
+                exit 1
+
+        fi
+}
+
+
+case "$1" in
+    start)
+        status $prog && exit 0
+	check_config
+        $1
+        ;;
+    stop)
+        status $prog|| exit 0
+        $1
+        ;;
+    restart)
+	check_config
+        $1
+        ;;
+    reload)
+        status $prog || exit 7
+	check_config
+        $1
+        ;;
+    force-reload)
+	check_config
+        force_reload
+        ;;
+    status)
+        status $prog
+        ;;
+    condrestart|try-restart)
+        status $prog|| exit 0
+	check_config
+        restart
+        ;;
+    configtest)
+        echo -n  $"Checking config for $prog: "
+        check_config && success
+        echo
+	;;
+    *)
+        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
+        exit 2
+esac
+exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2
new file mode 100644
index 0000000..920bfae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/resource.cfg.j2
@@ -0,0 +1,51 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+###########################################################################
+#
+# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
+#
+# Last Modified: 09-10-2003
+#
+# You can define $USERx$ macros in this file, which can in turn be used
+# in command definitions in your host config file(s).  $USERx$ macros are
+# useful for storing sensitive information such as usernames, passwords,
+# etc.  They are also handy for specifying the path to plugins and
+# event handlers - if you decide to move the plugins or event handlers to
+# a different directory in the future, you can just update one or two
+# $USERx$ macros, instead of modifying a lot of command definitions.
+#
+# The CGIs will not attempt to read the contents of resource files, so
+# you can set restrictive permissions (600 or 660) on them.
+#
+# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
+#
+# Resource files may also be used to store configuration directives for
+# external data sources like MySQL...
+#
+###########################################################################
+
+# Sets $USER1$ to be the path to the plugins
+$USER1$={{plugins_dir}}
+
+# Sets $USER2$ to be the path to event handlers
+#$USER2$={{eventhandlers_dir}}
+
+# Store some usernames and passwords (hidden from the CGIs)
+#$USER3$=someuser
+#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-log4j.xml
new file mode 100644
index 0000000..169085c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-log4j.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log4j.appender.oozie</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.oozie.DatePattern</name>
+    <value>.yyyy-MM-dd-HH</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.File</name>
+    <value>${oozie.log.dir}/oozie.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozie.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+
+  <property>
+    <name>log4j.appender.oozieops</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.File</name>
+    <value>${oozie.log.dir}/oozie-ops.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieops.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.File</name>
+    <value>${oozie.log.dir}/oozie-instrumentation.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieinstrumentation.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.File</name>
+    <value>${oozie.log.dir}/oozie-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.oozieaudit.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.File</name>
+    <value>${oozie.log.dir}/oozie-jpa.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.Append</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.openjpa.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %5p %c{1}:%L - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.logger.openjpa</name>
+    <value>INFO, openjpa</value>
+  </property>
+  <property>
+    <name>log4j.logger.oozieops</name>
+    <value>INFO, oozieops</value>
+  </property>
+  <property>
+    <name>log4j.logger.oozieinstrumentation</name>
+    <value>ALL, oozieinstrumentation</value>
+  </property>
+  <property>
+    <name>log4j.logger.oozieaudit</name>
+    <value>ALL, oozieaudit</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.oozie</name>
+    <value>INFO, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop</name>
+    <value>WARN, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.mortbay</name>
+    <value>WARN, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.hsqldb</name>
+    <value>WARN, oozie</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.security.authentication.server</name>
+    <value>INFO, oozie</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
index bda13d6..c473435 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
@@ -16,23 +16,99 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>3.3.2.1.3.2.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>3.3.2.1.3.3.0</version>
+      <components>
         <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
+          <name>OOZIE_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>OOZIE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>oozie-site</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>oozie.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>oozie-client.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/oozieSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/oozieSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/oozieSmoke.sh
new file mode 100644
index 0000000..2446544
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/oozieSmoke.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  su - ${smoke_test_user} -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the couner and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export oozie_conf_dir=$1
+export hadoop_conf_dir=$2
+export smoke_test_user=$3
+export security_enabled=$4
+export smoke_user_keytab=$5
+export kinit_path_local=$6
+
+export OOZIE_EXIT_CODE=0
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
+export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
+cd $OOZIE_EXAMPLES_DIR
+
+tar -zxf oozie-examples.tar.gz
+sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else 
+  kinitcmd=""
+fi
+
+su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
+su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id"
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..97a513c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
new file mode 100644
index 0000000..e1a7869
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,111 @@
+import os
+from resource_management import *
+
+def oozie(is_server=False
+              ):
+  import params
+
+  XmlConfig( "oozie-site.xml",
+    conf_dir = params.conf_dir, 
+    configurations = params.config['configurations']['oozie-site'],
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  
+  Directory( params.conf_dir,
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
+    owner = params.oozie_user
+  )
+
+  if (params.log4j_props != None):
+    PropertiesFile('oozie-log4j.properties',
+                   dir=params.conf_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.oozie_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.oozie_user
+    )
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
+    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
+     -o {check_db_connection_jar_name}'"),
+      not_if  = format("[ -f {check_db_connection_jar} ]")
+    )
+    
+  oozie_ownership( )
+  
+  if is_server:      
+    oozie_server_specific( )
+  
+def oozie_ownership(
+):
+  import params
+  
+  File ( format("{conf_dir}/adminusers.txt"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+def oozie_server_specific(
+):
+  import params
+  
+  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
+  Directory( oozie_server_directorties,
+    owner = params.oozie_user,
+    mode = 0755,
+    recursive = True
+  )
+       
+  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
+  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
+  
+  # this is different for HDP2
+  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir}")
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    cmd3 += format(" && mkdir -p {oozie_libext_dir} && cp {jdbc_driver_jar} {oozie_libext_dir}")
+    
+  # this is different for HDP2
+  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 {hadoop_jar_location} -extjs {ext_js_path} {jar_option} {jar_path}")
+  
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  Execute( [cmd1, cmd2, cmd3],
+    not_if  = no_op_test
+  )
+  Execute( cmd4,
+    user = params.oozie_user,
+    not_if  = no_op_test
+  )
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_client.py
new file mode 100644
index 0000000..23fdc12
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_server.py
new file mode 100644
index 0000000..eca2a56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=True)
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    oozie_service(action='start')
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_service.py
new file mode 100644
index 0000000..1d8767c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,45 @@
+from resource_management import *
+
+def oozie_service(action = 'start'): # 'start' or 'stop'
+  import params
+
+  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
+    
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+      
+    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
+    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+      
+    if db_connection_check_command:
+      Execute( db_connection_check_command)
+                  
+    Execute( cmd1,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+      ignore_failures = True
+    ) 
+    
+    Execute( cmd2,
+      user = params.oozie_user,       
+      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+    )
+    
+    Execute( start_cmd,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+    )
+  elif action == 'stop':
+    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
+    Execute( stop_cmd,
+      only_if  = no_op_test
+    )
+
+  
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
new file mode 100644
index 0000000..cd3e7bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,70 @@
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+oozie_user = config['configurations']['global']['oozie_user']
+smokeuser = config['configurations']['global']['smokeuser']
+conf_dir = "/etc/oozie/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+user_group = config['configurations']['global']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hadoop_prefix = "/usr"
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+# for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip"
+ext_js_path = "/usr/share/HDP-oozie/ext.zip"
+oozie_libext_dir = "/usr/lib/oozie/libext"
+lzo_enabled = config['configurations']['global']['lzo_enabled']
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
+oozie_keytab = config['configurations']['global']['oozie_keytab']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+java_share_dir = "/usr/share/java"
+
+java_home = config['hostLevelParams']['java_home']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['global']['oozie_log_dir']
+oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_lib_dir = "/var/lib/oozie/"
+oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+else:
+  jdbc_driver_jar = ""
+  
+if lzo_enabled or jdbc_driver_name:
+  jar_option = "-jars"         
+else:
+  jar_option = ""
+  
+lzo_jar_suffix = "/usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar" if lzo_enabled else ""
+  
+if lzo_enabled and jdbc_driver_name:
+    jar_path = format("{lzo_jar_suffix}:{jdbc_driver_jar}")        
+else:
+    jar_path = "{lzo_jar_suffix}{jdbc_driver_jar}"
+
+#oozie-log4j.properties
+if ('oozie-log4j' in config['configurations']):
+  log4j_props = config['configurations']['oozie-log4j']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/service_check.py
new file mode 100644
index 0000000..7dbfc87
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,47 @@
+from resource_management import *
+
+class OozieServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    # on HDP2 this file is different
+    smoke_test_file_name = 'oozieSmoke.sh'
+
+    oozie_smoke_shell_file( smoke_test_file_name)
+  
+def oozie_smoke_shell_file(
+  file_name
+):
+  import params
+
+  File( format("/tmp/{file_name}"),
+    content = StaticFile(file_name),
+    mode = 0755
+  )
+  
+  if params.security_enabled:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
+  else:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
+
+  Execute( format("/tmp/{file_name}"),
+    command   = sh_cmd,
+    tries     = 3,
+    try_sleep = 5,
+    logoutput = True
+  )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServiceCheck().execute()
+  
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+  #main()
+  


[30/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
new file mode 100644
index 0000000..87da4dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
@@ -0,0 +1,545 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+udp_recv_channel {
+    port = 0
+}
+
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrd.py
new file mode 100644
index 0000000..3fe6901
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrd.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import cgi
+import os
+import rrdtool
+import sys
+import time
+import re
+import urlparse
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+
+  args = [file, cf]
+
+  if start is not None:
+    args.extend(["-s", start])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  rrdMetric = rrdtool.fetch(args)
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    valueCount = 0
+    lastValue = None
+
+    for tuple in rrdMetric[2]:
+
+      thisValue = tuple[0]
+
+      if valueCount > 0 and thisValue == lastValue:
+        valueCount += 1
+      else:
+        if valueCount > 1:
+          sys.stdout.write("[~r]")
+          sys.stdout.write(str(valueCount))
+          sys.stdout.write("\n")
+
+        if thisValue is None:
+          sys.stdout.write("[~n]\n")
+        else:
+          sys.stdout.write(str(thisValue))
+          sys.stdout.write("\n")
+
+        valueCount = 1
+        lastValue = thisValue
+  else:
+    value = None
+    idx = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx -= 1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+  return
+
+
+def stripList(l):
+  return ([x.strip() for x in l])
+
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+requestMethod = os.environ['REQUEST_METHOD']
+
+if requestMethod == 'POST':
+  postData = sys.stdin.readline()
+  queryString = cgi.parse_qs(postData)
+  queryString = dict((k, v[0]) for k, v in queryString.items())
+elif requestMethod == 'GET':
+  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "/var/lib/ganglia/rrds/"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+def _walk(*args, **kwargs):
+  for root, dirs, files in os.walk(*args, **kwargs):
+    for dir in dirs:
+      qualified_dir = os.path.join(root, dir)
+      if os.path.islink(qualified_dir):
+        for x in os.walk(qualified_dir, **kwargs):
+          yield x
+    yield (root, dirs, files)
+
+
+for cluster in clusterParts:
+  for path, dirs, files in _walk(rrdPath + cluster):
+    pathParts = path.split("/")
+    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
+    #If host parameter passed - process only this host folder
+    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
+      for metric in metricParts:
+        file = metric + ".rrd"
+        fileFullPath = os.path.join(path, file)
+        if os.path.exists(fileFullPath):
+          #Exact name of metric
+          printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                      os.path.join(path, file), cf, start, end, resolution,
+                      pointInTime)
+        else:
+          #Regex as metric name
+          metricRegex = metric + '\.rrd$'
+          p = re.compile(metricRegex)
+          matchedFiles = filter(p.match, files)
+          for matchedFile in matchedFiles:
+            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                        os.path.join(path, matchedFile), cf, start, end,
+                        resolution, pointInTime)
+
+sys.stdout.write("[~EOF]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh
new file mode 100644
index 0000000..8b7c257
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh
new file mode 100644
index 0000000..5145b9c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh
@@ -0,0 +1,141 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+  -o <owner>              Owner
+  -g <group>              Group
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+owner='root';
+group='root';
+
+while getopts ":c:mto:g:" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    o)
+      owner=${OPTARG};
+      ;;
+    g)
+      group=${OPTARG};
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh
new file mode 100644
index 0000000..ab5102d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        for i in `seq 0 5`; do
+          gmetadRunningPid=`getGmetadRunningPid`;
+          if [ -n "${gmetadRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh
new file mode 100644
index 0000000..239b62e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+
+        for i in `seq 0 5`; do
+          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+          if [ -n "${gmondRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh
new file mode 100644
index 0000000..e79472b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    for i in `seq 0 5`; do
+      rrdcachedRunningPid=`getRrdcachedRunningPid`;
+      if [ -n "${rrdcachedRunningPid}" ]
+        then
+          break;
+      fi
+      sleep 1;
+    done
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh
new file mode 100644
index 0000000..2764e0e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh
new file mode 100644
index 0000000..1af3eb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -KILL ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh
new file mode 100644
index 0000000..0a0d8d8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh
new file mode 100644
index 0000000..b27f7a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py
new file mode 100644
index 0000000..69fde27
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py
@@ -0,0 +1,97 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import os
+
+
+def groups_and_users():
+  import params
+
+def config():
+  import params
+
+  shell_cmds_dir = params.ganglia_shell_cmds_dir
+  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
+                 'gmondLib.sh', 'rrdcachedLib.sh',
+                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
+                 'startRrdcached.sh', 'stopGmetad.sh',
+                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
+  Directory(shell_cmds_dir,
+            owner="root",
+            group="root",
+            recursive=True
+  )
+  init_file("gmetad")
+  init_file("gmond")
+  for sh_file in shell_files:
+    shell_file(sh_file)
+  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
+    ganglia_TemplateConfig(conf_file)
+
+
+def init_file(name):
+  import params
+
+  File("/etc/init.d/hdp-" + name,
+       content=StaticFile(name + ".init"),
+       mode=0755
+  )
+
+
+def shell_file(name):
+  import params
+
+  File(params.ganglia_shell_cmds_dir + os.sep + name,
+       content=StaticFile(name),
+       mode=0755
+  )
+
+
+def ganglia_TemplateConfig(name, mode=0755, tag=None):
+  import params
+
+  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
+                 owner="root",
+                 group="root",
+                 template_tag=tag,
+                 mode=mode
+  )
+
+
+def generate_daemon(ganglia_service,
+                    name=None,
+                    role=None,
+                    owner=None,
+                    group=None):
+  import params
+
+  cmd = ""
+  if ganglia_service == "gmond":
+    if role == "server":
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
+    else:
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
+  elif ganglia_service == "gmetad":
+    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
+  else:
+    raise Fail("Unexpected ganglia service")
+  Execute(format(cmd),
+          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
+                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py
new file mode 100644
index 0000000..4abdb9b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py
@@ -0,0 +1,139 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_monitor_service
+
+
+class GangliaMonitor(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+
+  def start(self, env):
+    ganglia_monitor_service.monitor("start")
+
+  def stop(self, env):
+    ganglia_monitor_service.monitor("stop")
+
+
+  def status(self, env):
+    import status_params
+    pid_file_name = 'gmond.pid'
+    pid_file_count = 0
+    pid_dir = status_params.pid_dir
+    # Recursively check all existing gmond pid files
+    for cur_dir, subdirs, files in os.walk(pid_dir):
+      for file_name in files:
+        if file_name == pid_file_name:
+          pid_file = os.path.join(cur_dir, file_name)
+          check_process_status(pid_file)
+          pid_file_count += 1
+    if pid_file_count == 0: # If no any pid file is present
+      raise ComponentIsNotRunning()
+
+
+  def configure(self, env):
+    import params
+
+    ganglia.groups_and_users()
+
+    Directory(params.ganglia_conf_dir,
+              owner="root",
+              group=params.user_group,
+              recursive=True
+    )
+
+    ganglia.config()
+
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_rmnode_master:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    pure_slave = not (params.is_namenode_master and
+                      params.is_jtnode_master and
+                      params.is_rmnode_master and
+                      params.is_hsnode_master and
+                      params.is_hbase_master) and params.is_slave
+    if pure_slave:
+      generate_daemon("gmond",
+                    name = "HDPSlaves",
+                    role = "monitor",
+                    owner = "root",
+                    group = params.user_group)
+
+    Directory(path.join(params.ganglia_dir, "conf.d"),
+              owner="root",
+              group=params.user_group
+    )
+
+    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "gmond.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+if __name__ == "__main__":
+  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py
new file mode 100644
index 0000000..d86d894
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def monitor(action=None):# 'start' or 'stop'
+  if action == "start":
+    Execute("chkconfig gmond off",
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    )
+  Execute(
+    format(
+      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
+    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py
new file mode 100644
index 0000000..863f092
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py
@@ -0,0 +1,197 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/gmetad.pid")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def configure(self, env):
+    import params
+
+    ganglia.groups_and_users()
+    ganglia.config()
+
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_resourcemanager:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_nodemanager:
+      generate_daemon("gmond",
+                      name = "HDPNodeManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_journalnode:
+      generate_daemon("gmond",
+                      name = "HDPJournalNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    generate_daemon("gmetad",
+                    name = "gmetad",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+    change_permission()
+    server_files()
+    File(path.join(params.ganglia_dir, "gmetad.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+def change_permission():
+  import params
+
+  Directory('/var/lib/ganglia/dwoo',
+            mode=0777,
+            owner=params.gmetad_user,
+            recursive=True
+  )
+
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+            recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  File(rrd_py_file_path,
+       content=StaticFile("rrd.py"),
+       mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
+    Directory(params.rrdcached_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              mode=0755,
+              recursive=True
+    )
+    Directory(params.rrdcached_default_base_dir,
+              action = "delete"
+    )
+    Link(params.rrdcached_default_base_dir,
+         to=params.rrdcached_base_dir
+    )
+  elif rrd_file_owner != 'nobody':
+    Directory(params.rrdcached_default_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              recursive=True
+    )
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py
new file mode 100644
index 0000000..b93e3f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def server(action=None):# 'start' or 'stop'
+  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  Execute(format(command),
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
new file mode 100644
index 0000000..601601e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+
+config = Script.get_config()
+
+user_group = config['configurations']['global']["user_group"]
+ganglia_conf_dir = "/etc/ganglia/hdp"
+ganglia_dir = "/etc/ganglia"
+ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
+
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+webserver_group = "apache"
+rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
+rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
+
+ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
+
+hostname = config["hostname"]
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+rm_host = default("/clusterHostInfo/rm_host", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+# datanodes are marked as slave_hosts
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
+flume_hosts = default("/clusterHostInfo/flume_hosts", [])
+jn_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+is_tasktracker = hostname in tt_hosts
+is_nodemanager = hostname in nm_hosts
+is_hbase_rs = hostname in hbase_rs_hosts
+is_flume = hostname in flume_hosts
+is_jn_host = hostname in jn_hosts
+
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_historyserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_tasktracker = not len(tt_hosts) == 0
+has_nodemanager = not len(nm_hosts) == 0
+has_hbase_rs = not len(hbase_rs_hosts) == 0
+has_flume = not len(flume_hosts) == 0
+has_journalnode = not len(jn_hosts) == 0
+
+if System.get_instance().os_family == "suse":
+  rrd_py_path = '/srv/www/cgi-bin'
+else:
+  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
new file mode 100644
index 0000000..3ccad2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..f3bb355
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,35 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+    HDPJournalNode      {{ganglia_server_host}}   8654
+    HDPFlumeServer      {{ganglia_server_host}}   8655
+    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
+    HDPNodeManager     	{{ganglia_server_host}}   8657
+    HDPTaskTracker     	{{ganglia_server_host}}   8658
+    HDPDataNode       	{{ganglia_server_host}}   8659
+    HDPSlaves       	{{ganglia_server_host}}   8660
+    HDPNameNode         {{ganglia_server_host}}   8661
+    HDPJobTracker     	{{ganglia_server_host}}   8662
+    HDPHBaseMaster      {{ganglia_server_host}}   8663
+    HDPResourceManager  {{ganglia_server_host}}   8664
+    HDPHistoryServer    {{ganglia_server_host}}   8666
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..1ead550
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..4b5bdd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..2258d73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hbase.root.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hbase.security.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hbase.log.dir</name>
+    <value>.</value>
+  </property>
+  <property>
+    <name>hbase.log.file</name>
+    <value>hbase.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hbase.root.logger}</value>
+  </property>
+  <property>
+    <name>log4j.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hbase.log.dir}/${hbase.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hbase.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hbase.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.File</name>
+    <value>${hbase.log.dir}/${hbase.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxFileSize</name>
+    <value>${hbase.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxBackupIndex</name>
+    <value>${hbase.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hbase.security.log.file</name>
+    <value>SecurityAuth.audit</value>
+  </property>
+  <property>
+    <name>hbase.security.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hbase.security.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.File</name>
+    <value>${hbase.log.dir}/${hbase.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxFileSize</name>
+    <value>${hbase.security.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxBackupIndex</name>
+    <value>${hbase.security.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.category.SecurityLogger</name>
+    <value>${hbase.security.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.SecurityLogger</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.NullAppender</name>
+    <value>org.apache.log4j.varia.NullAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p [%t] %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.zookeeper</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hbase</name>
+    <value>DEBUG</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher</name>
+    <value>INFO</value>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
index afe527d..c16b160 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
@@ -16,29 +16,79 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.96.1.2.0.6.1</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.96.0.2.1.1</version>
+      <components>
         <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
+          <name>HBASE_MASTER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
 
         <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HBASE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hbase-site</config-type>
-      <config-type>hbase-policy</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-log4j</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..39fe6e5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""


[02/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json b/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
deleted file mode 100644
index 9520b02..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/configs/secured.json
+++ /dev/null
@@ -1,549 +0,0 @@
-{
-    "roleCommand": "START", 
-    "clusterName": "cl1", 
-    "hostname": "c6402.ambari.apache.org", 
-    "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "ambari_db_rca_password": "mapred", 
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-1.3.4\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0\"}]", 
-        "package_list": "[{\"type\":\"rpm\",\"name\":\"hive\"},{\"type\":\"rpm\",\"name\":\"mysql-connector-java\"},{\"type\":\"rpm\",\"name\":\"mysql\"},{\"type\":\"rpm\",\"name\":\"mysql-server\"}]", 
-        "stack_version": "1.3.4", 
-        "stack_name": "HDP", 
-        "db_name": "ambari", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
-        "ambari_db_rca_username": "mapred", 
-        "java_home": "/usr/jdk64/jdk1.7.0_45", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
-    }, 
-    "commandType": "EXECUTION_COMMAND", 
-    "roleParams": {}, 
-    "serviceName": "HIVE", 
-    "role": "MYSQL_SERVER", 
-    "commandParams": {
-        "command_timeout": "600", 
-        "service_package_folder": "HIVE",
-        "script_type": "PYTHON", 
-        "schema_version": "2.0", 
-        "script": "scripts/mysql_server.py",
-        "excluded_hosts": "host1"
-    }, 
-    "taskId": 117, 
-    "public_hostname": "c6402.ambari.apache.org", 
-    "configurations": {
-        "mapred-site": {
-            "ambari.mapred.child.java.opts.memory": "768", 
-            "mapred.job.reduce.input.buffer.percent": "0.0", 
-            "mapred.job.map.memory.mb": "1536", 
-            "mapred.output.compression.type": "BLOCK", 
-            "mapred.jobtracker.maxtasks.per.job": "-1", 
-            "mapreduce.jobtracker.keytab.file": "/etc/security/keytabs/jt.service.keytab", 
-            "mapred.map.output.compression.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
-            "mapred.child.root.logger": "INFO,TLA", 
-            "mapred.tasktracker.tasks.sleeptime-before-sigkill": "250", 
-            "io.sort.spill.percent": "0.9", 
-            "mapred.reduce.parallel.copies": "30", 
-            "mapred.userlog.retain.hours": "24", 
-            "mapred.reduce.tasks.speculative.execution": "false", 
-            "mapred.healthChecker.interval": "135000", 
-            "io.sort.mb": "200", 
-            "mapreduce.jobtracker.kerberos.principal": "jt/_HOST@EXAMPLE.COM", 
-            "mapred.jobtracker.blacklist.fault-timeout-window": "180", 
-            "mapreduce.cluster.administrators": " hadoop", 
-            "mapred.job.shuffle.input.buffer.percent": "0.7", 
-            "mapred.job.tracker.history.completed.location": "/mapred/history/done", 
-            "io.sort.record.percent": ".2", 
-            "mapred.cluster.max.reduce.memory.mb": "4096", 
-            "mapred.job.reuse.jvm.num.tasks": "1", 
-            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
-            "mapred.job.tracker.http.address": "c6402.ambari.apache.org:50030", 
-            "mapred.job.tracker.persist.jobstatus.hours": "1", 
-            "mapred.healthChecker.script.path": "/etc/hadoop/conf/health_check", 
-            "mapreduce.jobtracker.staging.root.dir": "/user", 
-            "mapred.job.shuffle.merge.percent": "0.66", 
-            "mapred.cluster.reduce.memory.mb": "2048", 
-            "mapred.job.tracker.persist.jobstatus.dir": "/mapred/jobstatus", 
-            "mapreduce.tasktracker.group": "hadoop", 
-            "mapred.tasktracker.map.tasks.maximum": "4", 
-            "mapred.child.java.opts": "-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true", 
-            "mapreduce.jobhistory.keytab.file": "/etc/security/keytabs/jt.service.keytab", 
-            "mapred.jobtracker.retirejob.check": "10000", 
-            "mapred.job.tracker": "c6402.ambari.apache.org:50300", 
-            "mapreduce.history.server.embedded": "false", 
-            "io.sort.factor": "100", 
-            "hadoop.job.history.user.location": "none", 
-            "mapreduce.reduce.input.limit": "10737418240", 
-            "mapred.reduce.slowstart.completed.maps": "0.05", 
-            "mapred.cluster.max.map.memory.mb": "6144", 
-            "mapreduce.tasktracker.keytab.file": "/etc/security/keytabs/tt.service.keytab", 
-            "mapred.jobtracker.taskScheduler": "org.apache.hadoop.mapred.CapacityTaskScheduler", 
-            "mapred.max.tracker.blacklists": "16", 
-            "mapreduce.tasktracker.kerberos.principal": "tt/_HOST@EXAMPLE.COM", 
-            "mapred.local.dir": "/hadoop/mapred", 
-            "mapreduce.history.server.http.address": "c6402.ambari.apache.org:51111", 
-            "mapred.jobtracker.restart.recover": "false", 
-            "mapred.jobtracker.blacklist.fault-bucket-width": "15", 
-            "mapred.jobtracker.retirejob.interval": "21600000", 
-            "tasktracker.http.threads": "50", 
-            "mapred.job.tracker.persist.jobstatus.active": "false", 
-            "mapred.system.dir": "/mapred/system", 
-            "mapred.tasktracker.reduce.tasks.maximum": "2", 
-            "mapred.cluster.map.memory.mb": "1536", 
-            "mapred.hosts.exclude": "/etc/hadoop/conf/mapred.exclude", 
-            "mapred.queue.names": "default", 
-            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
-            "mapreduce.fileoutputcommitter.marksuccessfuljobs": "false", 
-            "mapred.job.reduce.memory.mb": "2048", 
-            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
-            "mapred.healthChecker.script.timeout": "60000", 
-            "jetty.connector": "org.mortbay.jetty.nio.SelectChannelConnector", 
-            "mapreduce.jobtracker.split.metainfo.maxsize": "50000000", 
-            "mapred.job.tracker.handler.count": "50", 
-            "mapred.inmem.merge.threshold": "1000", 
-            "mapred.hosts": "/etc/hadoop/conf/mapred.include", 
-            "mapred.task.tracker.task-controller": "org.apache.hadoop.mapred.LinuxTaskController", 
-            "mapred.jobtracker.completeuserjobs.maximum": "0", 
-            "mapred.task.timeout": "600000", 
-            "mapreduce.jobhistory.kerberos.principal": "jt/_HOST@EXAMPLE.COM", 
-            "mapred.map.tasks.speculative.execution": "false"
-        }, 
-        "oozie-site": {
-            "oozie.service.PurgeService.purge.interval": "3600", 
-            "oozie.service.CallableQueueService.queue.size": "1000", 
-            "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd", 
-            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
-            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
-            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
-            "local.realm": "EXAMPLE.COM", 
-            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
-            "oozie.service.HadoopAccessorService.kerberos.enabled": "true", 
-            "oozie.service.JPAService.create.db.schema": "false", 
-            "oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0](jt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](tt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nDEFAULT", 
-            "oozie.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "oozie.service.ActionService.executor.ext.classes": "org.apache.oozie.action.email.EmailActionExecutor,\norg.apache.oozie.action.hadoop.HiveActionExecutor,\norg.apache.oozie.action.hadoop.ShellActionExecutor,\norg.apache.oozie.action.hadoop.SqoopActionExecutor,\norg.apache.oozie.action.hadoop.DistcpActionExecutor", 
-            "oozie.service.HadoopAccessorService.kerberos.principal": "oozie/c6402.ambari.apache.org@EXAMPLE.COM", 
-            "oozie.service.AuthorizationService.authorization.enabled": "true", 
-            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
-            "oozie.service.JPAService.jdbc.password": "q", 
-            "oozie.service.coord.normal.default.timeout": "120", 
-            "oozie.service.JPAService.pool.max.active.conn": "10", 
-            "oozie.service.PurgeService.older.than": "30", 
-            "oozie.db.schema.name": "oozie", 
-            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
-            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
-            "oozie.service.CallableQueueService.callable.concurrency": "3", 
-            "oozie.service.JPAService.jdbc.username": "oozie", 
-            "oozie.service.CallableQueueService.threads": "10", 
-            "oozie.systemmode": "NORMAL", 
-            "oozie.service.HadoopAccessorService.keytab.file": "/etc/security/keytabs/oozie.service.keytab", 
-            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
-            "oozie.authentication.type": "kerberos", 
-            "oozie.authentication.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM", 
-            "oozie.system.id": "oozie-${user.name}"
-        }, 
-        "webhcat-site": {
-            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
-            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@EXAMPLE.COM", 
-            "templeton.override.enabled": "false", 
-            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
-            "templeton.kerberos.secret": "secret", 
-            "templeton.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM", 
-            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181", 
-            "templeton.exec.timeout": "60000", 
-            "templeton.storage.class": "org.apache.hcatalog.templeton.tool.ZooKeeperStorage", 
-            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
-            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
-            "templeton.port": "50111", 
-            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
-            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
-            "templeton.hadoop": "/usr/bin/hadoop", 
-            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
-            "templeton.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "templeton.hcat": "/usr/bin/hcat", 
-            "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz"
-        }, 
-        "global": {
-            "tasktracker_task_controller": "org.apache.hadoop.mapred.LinuxTaskController", 
-            "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab", 
-            "hadoop_http_principal_name": "HTTP/_HOST", 
-            "kinit_path_local": "/usr/bin", 
-            "nagios_keytab_path": "/etc/security/keytabs/nagios.service.keytab", 
-            "hbase_regionserver_heapsize": "1024m", 
-            "datanode_primary_name": "dn", 
-            "namenode_principal_name": "nn/_HOST", 
-            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_principal_name": "nagios/c6402.ambari.apache.org@EXAMPLE.COM", 
-            "dfs_datanode_http_address": "1022", 
-            "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
-            "jobtracker_primary_name": "jt", 
-            "hbase_pid_dir": "/var/run/hbase", 
-            "namenode_opt_maxnewsize": "200m", 
-            "syncLimit": "5", 
-            "clientPort": "2181", 
-            "oozie_jdbc_driver": "org.apache.derby.jdbc.EmbeddedDriver", 
-            "hive_metastore_primary_name": "hive", 
-            "hbase_master_keytab": "/etc/security/keytabs/hbase.service.keytab", 
-            "nagios_primary_name": "nagios", 
-            "jobtracker_principal_name": "jt/_HOST", 
-            "hive_database": "New MySQL Database", 
-            "hcat_pid_dir": "/etc/run/webhcat", 
-            "oozie_derby_database": "Derby", 
-            "snappy_enabled": "true", 
-            "oozie_pid_dir": "/var/run/oozie", 
-            "datanode_principal_name": "dn/_HOST", 
-            "hive_metastore_keytab": "/etc/security/keytabs/hive.service.keytab", 
-            "nagios_group": "nagios", 
-            "hcat_user": "hcat", 
-            "hadoop_heapsize": "1024", 
-            "hbase_regionserver_primary_name": "hbase", 
-            "zk_user": "zookeeper", 
-            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
-            "keytab_path": "/etc/security/keytabs", 
-            "hive_pid_dir": "/var/run/hive", 
-            "webhcat_server": "c6402.ambari.apache.org", 
-            "zk_data_dir": "/hadoop/zookeeper", 
-            "hcat_log_dir": "/var/log/webhcat", 
-            "oozie_hostname": "c6402.ambari.apache.org", 
-            "tasktracker_principal_name": "tt/_HOST", 
-            "jobtracker_keytab": "/etc/security/keytabs/jt.service.keytab", 
-            "tasktracker_keytab": "/etc/security/keytabs/tt.service.keytab", 
-            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
-            "namenode_heapsize": "1024m", 
-            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
-            "kerberos_domain": "EXAMPLE.COM", 
-            "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
-            "nagios_server": "c6402.ambari.apache.org", 
-            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
-            "lzo_enabled": "true", 
-            "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
-            "hive_jdbc_driver": "com.mysql.jdbc.Driver", 
-            "dfs_datanode_address": "1019", 
-            "namenode_opt_newsize": "200m", 
-            "initLimit": "10", 
-            "hive_database_type": "mysql", 
-            "zk_pid_dir": "/var/run/zookeeper", 
-            "namenode_primary_name": "nn", 
-            "tickTime": "2000", 
-            "hive_metastore_principal_name": "hive/_HOST", 
-            "datanode_keytab": "/etc/security/keytabs/dn.service.keytab", 
-            "zk_log_dir": "/var/log/zookeeper", 
-            "oozie_http_principal_name": "HTTP/c6402.ambari.apache.org", 
-            "tasktracker_primary_name": "tt", 
-            "hadoop_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "gmetad_user": "nobody", 
-            "oozie_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hive_metastore": "c6402.ambari.apache.org", 
-            "nagios_user": "nagios", 
-            "security_enabled": "true", 
-            "proxyuser_group": "users", 
-            "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/", 
-            "hbase_primary_name": "hbase", 
-            "oozie_http_primary_name": "HTTP", 
-            "dtnode_heapsize": "1024m", 
-            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM", 
-            "oozie_log_dir": "/var/log/oozie", 
-            "webhcat_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "oozie_user": "oozie", 
-            "oozie_data_dir": "/hadoop/oozie/data", 
-            "oozie_primary_name": "oozie", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "zookeeper_primary_name": "zookeeper", 
-            "hbase_master_principal_name": "hbase/_HOST", 
-            "jtnode_heapsize": "1024m", 
-            "yarn_user": "yarn", 
-            "gmond_user": "nobody", 
-            "nagios_web_login": "nagiosadmin", 
-            "nagios_contact": "q@q.q", 
-            "snamenode_primary_name": "nn", 
-            "hdfs_user": "hdfs", 
-            "oozie_database_type": "derby", 
-            "webhcat_user": "hcat", 
-            "hive_hostname": "c6402.ambari.apache.org", 
-            "hbase_regionserver_principal_name": "hbase/_HOST", 
-            "hive_log_dir": "/var/log/hive", 
-            "smokeuser_principal_name": "ambari-qa", 
-            "mapred_user": "mapred", 
-            "smokeuser_primary_name": "ambari-qa", 
-            "jtnode_opt_maxnewsize": "200m", 
-            "hbase_master_primary_name": "hbase", 
-            "oozie_servername": "c6402.ambari.apache.org", 
-            "hdfs_primary_name": "hdfs", 
-            "hive_ambari_database": "MySQL", 
-            "rca_enabled": "true", 
-            "hadoop_http_primary_name": "HTTP", 
-            "webHCat_http_principal_name": "HTTP/c6402.ambari.apache.org", 
-            "mysql_connector_url": "${download_url}/mysql-connector-java-5.1.18.zip", 
-            "hive_metastore_port": "9083", 
-            "hbase_user": "hbase", 
-            "snamenode_principal_name": "nn/_HOST", 
-            "oozie_database": "New Derby Database", 
-            "hbase_log_dir": "/var/log/hbase", 
-            "user_group": "hadoop", 
-            "hive_user": "hive", 
-            "webHCat_http_primary_name": "HTTP", 
-            "nagios_web_password": "q", 
-            "smokeuser": "ambari-qa", 
-            "ganglia_conf_dir": "/etc/ganglia/hdp", 
-            "hbase_master_heapsize": "1024m", 
-            "kerberos_install_type": "MANUALLY_SET_KERBEROS", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "hive_aux_jars_path": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar", 
-            "jtnode_opt_newsize": "200m", 
-            "hbase_regionserver_keytab": "/etc/security/keytabs/hbase.service.keytab", 
-            "hbase_principal_name": "hbase", 
-            "hdfs_principal_name": "hdfs"
-        }, 
-        "hdfs-site": {
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
-            "ipc.server.max.response.size": "5242880", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.support.append": "true", 
-            "dfs.cluster.administrators": " hdfs", 
-            "ambari.dfs.datanode.http.port": "1022", 
-            "dfs.block.size": "134217728", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.hosts": "/etc/hadoop/conf/dfs.include", 
-            "dfs.datanode.du.reserved": "1073741824", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "100", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.socket.write.timeout": "0", 
-            "ipc.server.read.threadpool.size": "5", 
-            "dfs.balance.bandwidthPerSec": "6250000", 
-            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.permissions.supergroup": "hdfs", 
-            "dfs.secondary.http.address": "c6402.ambari.apache.org:50090", 
-            "ambari.dfs.datanode.port": "1019", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.name.dir": "/hadoop/hdfs/namenode", 
-            "dfs.access.time.precision": "0", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
-            "dfs.https.address": "c6401.ambari.apache.org:50470", 
-            "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", 
-            "dfs.data.dir": "/hadoop/hdfs/data", 
-            "dfs.secondary.https.port": "50490", 
-            "dfs.permissions": "true", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.block.local-path-access.user": "hbase", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.web.ugi": "gopher,gopher", 
-            "dfs.datanode.du.pct": "0.85f", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.http.address": "c6401.ambari.apache.org:50070", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.https.port": "50070", 
-            "dfs.replication.max": "50", 
-            "dfs.datanode.max.xcievers": "4096", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.safemode.threshold.pct": "1.0f", 
-            "dfs.umaskmode": "077"
-        }, 
-        "hbase-site": {
-            "hbase.client.keyvalue.maxsize": "10485760", 
-            "hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab", 
-            "hbase.hstore.compactionThreshold": "3", 
-            "hbase.zookeeper.property.clientPort": "2181", 
-            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
-            "hbase.regionserver.handler.count": "60", 
-            "dfs.client.read.shortcircuit": "true", 
-            "hbase.bulkload.staging.dir": "/apps/hbase/staging", 
-            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
-            "hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", 
-            "hbase.hregion.memstore.block.multiplier": "2", 
-            "hbase.hregion.memstore.flush.size": "134217728", 
-            "hbase.superuser": "hbase", 
-            "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController", 
-            "hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine", 
-            "hbase.hregion.max.filesize": "10737418240", 
-            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
-            "zookeeper.session.timeout": "60000", 
-            "hbase.tmp.dir": "/hadoop/hbase", 
-            "hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", 
-            "hfile.block.cache.size": "0.40", 
-            "hbase.security.authentication": "kerberos", 
-            "hbase.zookeeper.quorum": "c6401.ambari.apache.org", 
-            "zookeeper.znode.parent": "/hbase-secure", 
-            "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController", 
-            "hbase.hstore.blockingStoreFiles": "10", 
-            "hbase.hregion.majorcompaction": "86400000", 
-            "hbase.security.authorization": "true", 
-            "hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab", 
-            "hbase.cluster.distributed": "true", 
-            "hbase.hregion.memstore.mslab.enabled": "true", 
-            "hbase.client.scanner.caching": "100", 
-            "hbase.zookeeper.useMulti": "true"
-        }, 
-        "core-site": {
-            "fs.default.name": "hdfs://c6401.ambari.apache.org:8020", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "hadoop.proxyuser.HTTP.hosts": "c6402.ambari.apache.org", 
-            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
-            "fs.checkpoint.period": "21600", 
-            "hadoop.proxyuser.hcat.groups": "users", 
-            "fs.checkpoint.size": "67108864", 
-            "fs.trash.interval": "360", 
-            "hadoop.proxyuser.hive.groups": "users", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "hadoop.security.authentication": "kerberos", 
-            "fs.checkpoint.edits.dir": "${fs.checkpoint.dir}", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "io.compression.codec.lzo.class": "com.hadoop.compression.lzo.LzoCodec", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "webinterface.private.actions": "false", 
-            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
-            "hadoop.proxyuser.oozie.groups": "users", 
-            "hadoop.security.authorization": "true", 
-            "fs.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[2:$1@$0](jt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](tt@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nDEFAULT", 
-            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
-            "ipc.client.connection.maxidletime": "30000"
-        }, 
-        "hive-site": {
-            "hive.enforce.sorting": "true", 
-            "javax.jdo.option.ConnectionPassword": "q", 
-            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
-            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
-            "fs.file.impl.disable.cache": "true", 
-            "hive.auto.convert.join.noconditionaltask": "true", 
-            "hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM", 
-            "hive.optimize.bucketmapjoin": "true", 
-            "hive.map.aggr": "true", 
-            "hive.security.authorization.enabled": "true", 
-            "hive.optimize.reducededuplication.min.reducer": "1", 
-            "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab", 
-            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
-            "hive.mapjoin.bucket.cache.size": "10000", 
-            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
-            "javax.jdo.option.ConnectionUserName": "hive", 
-            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
-            "hive.server2.authentication": "KERBEROS", 
-            "hive.metastore.sasl.enabled": "true", 
-            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
-            "hive.metastore.client.socket.timeout": "60", 
-            "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM", 
-            "hive.semantic.analyzer.factory.impl": "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory", 
-            "hive.auto.convert.join": "true", 
-            "hive.enforce.bucketing": "true", 
-            "hive.mapred.reduce.tasks.speculative.execution": "false", 
-            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
-            "hive.auto.convert.sortmerge.join": "true", 
-            "fs.hdfs.impl.disable.cache": "true", 
-            "hive.security.authorization.manager": "org.apache.hcatalog.security.HdfsAuthorizationProvider", 
-            "ambari.hive.db.schema.name": "hive", 
-            "hive.metastore.execute.setugi": "true", 
-            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
-            "hive.server2.enable.doAs": "true", 
-            "hive.optimize.mapjoin.mapreduce": "true", 
-            "hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab"
-        }
-    }, 
-    "configurationTags": {
-        "mapred-site": {
-            "tag": "version1389980437965"
-        }, 
-        "oozie-site": {
-            "tag": "version1389980437966"
-        }, 
-        "webhcat-site": {
-            "tag": "version1389980437965"
-        }, 
-        "global": {
-            "tag": "version1389980437965"
-        }, 
-        "hdfs-site": {
-            "tag": "version1389980437965"
-        }, 
-        "hbase-site": {
-            "tag": "version1389980437965"
-        }, 
-        "core-site": {
-            "tag": "version1389980437965"
-        }, 
-        "hive-site": {
-            "tag": "version1389980437965"
-        }
-    }, 
-    "commandId": "4-2", 
-    "clusterHostInfo": {
-        "snamenode_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "ganglia_monitor_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "nagios_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hive_metastore_hosts": [
-            "c6402.ambari.apache.org"
-        ], 
-        "all_ping_ports": [
-            "8670", 
-            "8670"
-        ], 
-        "mapred_tt_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "all_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "hbase_rs_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "slave_hosts": [
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "namenode_host": [
-            "c6401.ambari.apache.org"
-        ], 
-        "ganglia_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hbase_master_hosts": [
-            "c6401.ambari.apache.org"
-        ], 
-        "hive_mysql_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "oozie_server": [
-            "c6402.ambari.apache.org"
-        ], 
-        "webhcat_server_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "jtnode_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "zookeeper_hosts": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hs_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "hive_server_host": [
-            "c6402.ambari.apache.org"
-        ]
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/hooks/before-INSTALL/test_before_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/1.3.3/hooks/before-INSTALL/test_before_install.py
deleted file mode 100644
index 4b385bb..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/hooks/before-INSTALL/test_before_install.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from stacks.utils.RMFTestCase import *
-
-
-class TestHookBeforeInstall(RMFTestCase):
-  def test_hook_default(self):
-    self.executeScript("1.3.3/hooks/before-INSTALL/scripts/hook.py",
-                       classname="BeforeConfigureHook",
-                       command="hook",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Group', 'hadoop',)
-    self.assertResourceCalled('Group', 'users',)
-    self.assertResourceCalled('Group', 'users',)
-    self.assertResourceCalled('User', 'ambari-qa',
-                              gid='hadoop',
-                              groups=[u'users'],)
-    self.assertResourceCalled('File', '/tmp/changeUid.sh',
-                              content=StaticFile('changeToSecureUid.sh'),
-                              mode=0555,)
-    self.assertResourceCalled('Execute',
-                              '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 2>/dev/null',
-                              not_if='test $(id -u ambari-qa) -gt 1000',)
-    self.assertResourceCalled('User', 'hbase',
-                              gid='hadoop',
-                              groups=[u'hadoop'],)
-    self.assertResourceCalled('File', '/tmp/changeUid.sh',
-                              content=StaticFile('changeToSecureUid.sh'),
-                              mode=0555,)
-    self.assertResourceCalled('Execute',
-                              '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/hadoop/hbase 2>/dev/null',
-                              not_if='test $(id -u hbase) -gt 1000',)
-    self.assertResourceCalled('Group', 'nagios',)
-    self.assertResourceCalled('User', 'nagios', gid='nagios',)
-    self.assertResourceCalled('User', 'oozie', gid='hadoop',)
-    self.assertResourceCalled('User', 'hcat', gid='hadoop',)
-    self.assertResourceCalled('User', 'hcat', gid='hadoop',)
-    self.assertResourceCalled('User', 'hive',
-                              gid='hadoop',)
-    self.assertResourceCalled('Group', 'nobody',)
-    self.assertResourceCalled('Group', 'nobody',)
-    self.assertResourceCalled('User', 'nobody',
-                              gid='hadoop',
-                              groups=[u'nobody'],)
-    self.assertResourceCalled('User', 'nobody',
-                              gid='hadoop',
-                              groups=[u'nobody'],)
-    self.assertResourceCalled('User', 'hdfs',
-                              gid='hadoop',
-                              groups=[u'hadoop'],)
-    self.assertResourceCalled('User', 'mapred',
-                              gid='hadoop',
-                              groups=[u'hadoop'],)
-    self.assertResourceCalled('User', 'zookeeper',
-                              gid='hadoop',)
-    self.assertResourceCalled('Package', 'unzip',)
-    self.assertResourceCalled('Package', 'net-snmp',)
-    self.assertResourceCalled('Package', 'net-snmp-utils',)
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/1.3.3/hooks/before-START/test_before_start.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.3/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/1.3.3/hooks/before-START/test_before_start.py
deleted file mode 100644
index 6829655..0000000
--- a/ambari-server/src/test/python/stacks/1.3.3/hooks/before-START/test_before_start.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from stacks.utils.RMFTestCase import *
-
-class TestHookBeforeStart(RMFTestCase):
-  def test_hook_default(self):
-    self.executeScript("1.3.3/hooks/before-START/scripts/hook.py",
-                       classname="BeforeConfigureHook",
-                       command="hook",
-                       config_file="default.json"
-    )
-    self.assertResourceCalled('Execute', 'mkdir -p /tmp/HDP-artifacts/ ; curl -kf --retry 10 http://c6401.ambari.apache.org:8080/resources//jdk-7u45-linux-x64.tar.gz -o /tmp/HDP-artifacts//jdk-7u45-linux-x64.tar.gz',
-                              not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
-                              path = ['/bin', '/usr/bin/'],
-                              )
-    self.assertResourceCalled('Execute', 'mkdir -p /usr/jdk64 ; cd /usr/jdk64 ; tar -xf /tmp/HDP-artifacts//jdk-7u45-linux-x64.tar.gz > /dev/null 2>&1',
-                              not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
-                              path = ['/bin', '/usr/bin/'],
-                              )
-    self.assertResourceCalled('Execute', 'mkdir -p /tmp/HDP-artifacts/; curl -kf --retry 10 http://c6401.ambari.apache.org:8080/resources//UnlimitedJCEPolicyJDK7.zip -o /tmp/HDP-artifacts//UnlimitedJCEPolicyJDK7.zip',
-                              not_if = 'test -e /tmp/HDP-artifacts//UnlimitedJCEPolicyJDK7.zip',
-                              ignore_failures = True,
-                              path = ['/bin', '/usr/bin/'],
-                              )
-    self.assertResourceCalled('File', '/etc/snmp/snmpd.conf',
-                              content = Template('snmpd.conf.j2'),
-                              )
-    self.assertResourceCalled('Service', 'snmpd',
-                              action = ['restart'],
-                              )
-    self.assertResourceCalled('Execute', '/bin/echo 0 > /selinux/enforce',
-                              only_if = 'test -f /selinux/enforce',
-                              )
-    self.assertResourceCalled('Execute', 'mkdir -p /usr/lib/hadoop/lib/native/Linux-i386-32; ln -sf /usr/lib/libsnappy.so /usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
-                              )
-    self.assertResourceCalled('Execute', 'mkdir -p /usr/lib/hadoop/lib/native/Linux-amd64-64; ln -sf /usr/lib64/libsnappy.so /usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
-                              )
-    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
-                              owner = 'root',
-                              group = 'root',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('Directory', '/var/log/hadoop',
-                              owner = 'root',
-                              group = 'root',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('Directory', '/var/run/hadoop',
-                              owner = 'root',
-                              group = 'root',
-                              recursive = True,
-                              )
-    self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
-                              content = Template('hdfs.conf.j2'),
-                              owner = 'root',
-                              group = 'root',
-                              mode = 0644,
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
-                              content = Template('taskcontroller.cfg.j2'),
-                              owner = 'hdfs',
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-env.sh',
-                              content = Template('hadoop-env.sh.j2'),
-                              owner = 'hdfs',
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/commons-logging.properties',
-                              content = Template('commons-logging.properties.j2'),
-                              owner = 'hdfs',
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
-                              content = Template('slaves.j2'),
-                              owner = 'hdfs',
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/health_check',
-                              content = Template('health_check.j2'),
-                              owner = 'hdfs',
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.driver=.*~ambari.jobhistory.driver=org.postgresql.Driver~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA=.*~log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.driver=.*~log4j.appender.JHA.driver=${ambari.jobhistory.driver}~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.database=.*~log4j.appender.JHA.database=${ambari.jobhistory.database}~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.logger=.*~ambari.jobhistory.logger=DEBUG,JHA~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.password=.*~log4j.appender.JHA.password=${ambari.jobhistory.password}~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.database=.*~ambari.jobhistory.database=jdbc:postgresql://c6401.ambari.apache.org/ambarirca~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=.*~log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=.*~log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?log4j.appender.JHA.user=.*~log4j.appender.JHA.user=${ambari.jobhistory.user}~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.user=.*~ambari.jobhistory.user=mapred~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('Execute', "sed -i 's~\\(###\\)\\?ambari.jobhistory.password=.*~ambari.jobhistory.password=mapred~' /etc/hadoop/conf/log4j.properties",
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-metrics2.properties',
-                              content = Template('hadoop-metrics2.properties.j2'),
-                              owner = 'hdfs',
-                              )
-    self.assertResourceCalled('XmlConfig', 'core-site.xml',
-                              owner = 'hdfs',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
-                              configurations = self.getConfig()['configurations']['core-site'],
-                              )
-    self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
-                              owner = 'mapred',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
-                              configurations = self.getConfig()['configurations']['mapred-site'],
-                              )
-    self.assertResourceCalled('File', '/etc/hadoop/conf/task-log4j.properties',
-                              content = StaticFile('task-log4j.properties'),
-                              mode = 0755,
-                              )
-    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
-                              owner = 'hdfs',
-                              group = 'hadoop',
-                              conf_dir = '/etc/hadoop/conf',
-                              configurations = self.getConfig()['configurations']['hdfs-site'],
-                              )
-    self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/hadoop-tools.jar',
-                              to = '/usr/lib/hadoop/hadoop-tools.jar',
-                              )
-    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
new file mode 100644
index 0000000..79c24ee
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+
+class TestGangliaMonitor(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+                       classname="GangliaMonitor",
+                       command="configure",
+                       config_file="default.json"
+                      )
+    self.assertResourceCalled('Directory', '/etc/ganglia/hdp',
+                              owner = 'root',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
+                              content = StaticFile('gmetad.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
+                              content = StaticFile('gmond.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
+                              content = StaticFile('checkGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
+                              content = StaticFile('checkRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
+                              content = StaticFile('gmetadLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
+                              content = StaticFile('gmondLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
+                              content = StaticFile('rrdcachedLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
+                              content = StaticFile('setupGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
+                              content = StaticFile('startGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
+                              content = StaticFile('startGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
+                              content = StaticFile('startRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
+                              content = StaticFile('stopGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
+                              content = StaticFile('stopGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
+                              content = StaticFile('stopRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
+                              content = StaticFile('teardownGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Directory', '/etc/ganglia/conf.d',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/conf.d/modgstatus.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/conf.d/multicpu.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/gmond.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+                       classname="GangliaMonitor",
+                       command="start",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'chkconfig gmond off',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', 'service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+                       classname="GangliaMonitor",
+                       command="stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertNoMoreResources()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
new file mode 100644
index 0000000..870bc22
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from stacks.utils.RMFTestCase import *
+
+
+class TestGangliaServer(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+                       classname="GangliaServer",
+                       command="configure",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
+                              content = StaticFile('gmetad.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
+                              content = StaticFile('gmond.init'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
+                              content = StaticFile('checkGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
+                              content = StaticFile('checkRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
+                              content = StaticFile('gmetadLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
+                              content = StaticFile('gmondLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
+                              content = StaticFile('rrdcachedLib.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
+                              content = StaticFile('setupGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
+                              content = StaticFile('startGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
+                              content = StaticFile('startGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
+                              content = StaticFile('startRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
+                              content = StaticFile('stopGmetad.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
+                              content = StaticFile('stopGmond.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
+                              content = StaticFile('stopRrdcached.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
+                              content = StaticFile('teardownGanglia.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
+                              owner = 'root',
+                              template_tag = None,
+                              group = 'root',
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPResourceManager -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNodeManager -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHistoryServer -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPDataNode -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseRegionServer -m -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -t -o root -g hadoop',
+                              path = ['/usr/libexec/hdp/ganglia',
+                                      '/usr/sbin',
+                                      '/sbin:/usr/local/bin',
+                                      '/bin',
+                                      '/usr/bin'],
+                              )
+    self.assertResourceCalled('Directory', '/var/lib/ganglia/dwoo',
+                              owner = 'nobody',
+                              recursive = True,
+                              mode = 0777,
+                              )
+    self.assertResourceCalled('Directory', '/srv/www/cgi-bin',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/srv/www/cgi-bin/rrd.py',
+                              content = StaticFile('rrd.py'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('File', '/etc/ganglia/gmetad.conf',
+                              owner = 'root',
+                              group = 'hadoop',
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+                       classname="GangliaServer",
+                       command="start",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertResourceCalled('MonitorWebserver', 'restart',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+                       classname="GangliaServer",
+                       command="stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              )
+    self.assertResourceCalled('MonitorWebserver', 'restart',
+                              )
+    self.assertNoMoreResources()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
new file mode 100644
index 0000000..2d8bd77
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHBaseClient(RMFTestCase):
+  
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "configure",
+                   config_file="secured.json"
+    )
+    
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_client_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "configure",
+                   config_file="default.json"
+    )
+    
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )    
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )    
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )    
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )    
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )    
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )    
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertNoMoreResources()
+    
+
+    
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
new file mode 100644
index 0000000..9032f38
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+class TestHBaseMaster(RMFTestCase):
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "configure",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+    
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="default.json"
+    )
+    
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase'
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "stop",
+                   config_file="default.json"
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master && rm -f /var/run/hbase/hbase-hbase-master.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+
+  def test_decom_default(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="default.json"
+    )
+
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "configure",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="secured.json"
+    )
+    
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "stop",
+                   config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master && rm -f /var/run/hbase/hbase-hbase-master.pid',
+      not_if = None,
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+
+  def test_decom_secure(self):
+    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="secured.json"
+    )
+
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )   
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )  
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )   
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )    
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+  
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'], # don't hardcode all the properties
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_master_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
\ No newline at end of file


[22/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn-env.sh.j2
new file mode 100644
index 0000000..70bb71a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn-env.sh.j2
@@ -0,0 +1,119 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn.conf.j2
new file mode 100644
index 0000000..be89b07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile 32768
+{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
new file mode 100644
index 0000000..8fb48b9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log4j.appender.CONSOLE</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.CONSOLE.Threshold</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.appender.CONSOLE.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.CONSOLE.layout.ConversionPattern</name>
+    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.Threshold</name>
+    <value>DEBUG</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.File</name>
+    <value>zookeeper.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.MaxFileSize</name>
+    <value>10MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.ROLLINGFILE.layout.ConversionPattern</name>
+    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE</name>
+    <value>org.apache.log4j.FileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.Threshold</name>
+    <value>TRACE</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.File</name>
+    <value>zookeeper_trace.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.TRACEFILE.layout.ConversionPattern</name>
+    <value>%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
index 565b4d4..3e048a6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
@@ -16,24 +16,56 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.2.0.6.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.5.2.1.1</version>
+      <components>
 
-    <components>
         <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
+          <name>ZOOKEEPER_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
+          <name>ZOOKEEPER_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
+      </components>
 
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
 
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>zookeeper-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh
new file mode 100644
index 0000000..07017e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh
new file mode 100644
index 0000000..49ceb4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh
new file mode 100644
index 0000000..32dfce4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh
new file mode 100644
index 0000000..c1c11b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smoke_script=$1
+smoke_user=$2
+conf_dir=$3
+client_port=$4
+security_enabled=$5
+kinit_path_local=$6
+smoke_user_keytab=$7
+export ZOOKEEPER_EXIT_CODE=0
+test_output_file=/tmp/zkSmoke.out
+errors_expr="ERROR|Exception"
+acceptable_expr="SecurityException"
+zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
+echo "zk_node1=$zk_node1"
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
+  su - $smoke_user -c "$kinitcmd"
+fi
+
+function verify_output() {
+  if [ -f $test_output_file ]; then
+    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
+    if [ "$?" -eq 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+# Delete /zk_smoketest znode if exists
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
+# Create /zk_smoketest znode on one zookeeper server
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+verify_output
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
+  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed" 
+else
+   echo "Zookeeper Smoke Test: Passed" 
+fi
+exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
new file mode 100644
index 0000000..3c02248
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/zookeeper/conf"
+zk_user =  config['configurations']['global']['zk_user']
+hostname = config['hostname']
+zk_bin = '/usr/lib/zookeeper/bin'
+user_group = config['configurations']['global']['user_group']
+
+smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
+zk_log_dir = config['configurations']['global']['zk_log_dir']
+zk_data_dir = config['configurations']['global']['zk_data_dir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+tickTime = config['configurations']['global']['tickTime']
+initLimit = config['configurations']['global']['initLimit']
+syncLimit = config['configurations']['global']['syncLimit']
+clientPort = config['configurations']['global']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_primary_name = "zookeeper"
+zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
+zk_principal = zk_principal_name.replace('_HOST',hostname)
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+keytab_path = "/etc/security/keytabs"
+zk_keytab_path = format("{keytab_path}/zk.service.keytab")
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['global']['security_enabled']
+
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#log4j.properties
+if ('zookeeper-log4j' in config['configurations']):
+  log4j_props = config['configurations']['zookeeper-log4j']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100644
index 0000000..6b3553d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File("/tmp/zkSmoke.sh",
+         mode=0755,
+         content=StaticFile('zkSmoke.sh')
+    )
+
+    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
+                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
+                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd_qourum,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py
new file mode 100644
index 0000000..98f2903
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+zk_pid_dir = config['configurations']['global']['zk_pid_dir']
+zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py
new file mode 100644
index 0000000..ac69829
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+
+def zookeeper(type = None):
+  import params
+
+  Directory(params.config_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  configFile("zoo.cfg", template_name="zoo.cfg.j2")
+  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
+  configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+  Directory(params.zk_pid_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_log_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_data_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  if type == 'server':
+    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+
+    File(format("{zk_data_dir}/myid"),
+         mode = 0644,
+         content = myid
+    )
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+                   dir=params.config_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.zk_user,
+                   group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
+    File(format("{params.config_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.zk_user
+    )
+
+  if params.security_enabled:
+    if type == "server":
+      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+    else:
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+
+  File(format("{config_dir}/zoo_sample.cfg"),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{config_dir}/{name}"),
+       content=Template(template_name),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py
new file mode 100644
index 0000000..028a37d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+
+class ZookeeperClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    zookeeper(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py
new file mode 100644
index 0000000..e8cc264
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+from zookeeper_service import zookeeper_service
+
+class ZookeeperServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    zookeeper(type='server')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    zookeeper_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    zookeeper_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zk_pid_file)
+
+if __name__ == "__main__":
+  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py
new file mode 100644
index 0000000..83b8f08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def zookeeper_service(action='start'):
+  import params
+
+  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
+
+  if action == 'start':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
+    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.zk_user
+    )
+  elif action == 'stop':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
+    rm_pid = format("rm -f {zk_pid_file}")
+    Execute(daemon_cmd,
+            user=params.zk_user
+    )
+    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2
new file mode 100644
index 0000000..c003ba2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2
new file mode 100644
index 0000000..5b68218
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2
@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The number of milliseconds of each tick
+tickTime={{tickTime}}
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit={{initLimit}}
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit={{syncLimit}}
+# the directory where the snapshot is stored.
+dataDir={{zk_data_dir}}
+# the port at which the clients will connect
+clientPort={{clientPort}}
+{% for host in zookeeper_hosts %}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled %}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}
+
+{% if zoo_cfg_properties_map_length > 0 %}
+# Custom properties
+{% endif %}
+{% for key, value in zoo_cfg_properties_map.iteritems() %}
+{{key}}={{value}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
new file mode 100644
index 0000000..493a2a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export JAVA_HOME={{java64_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
new file mode 100644
index 0000000..696718e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
@@ -0,0 +1,5 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
new file mode 100644
index 0000000..aa123e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
@@ -0,0 +1,8 @@
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{zk_keytab_path}}"
+principal="{{zk_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh
deleted file mode 100644
index 4872a10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index 51e5cd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_users()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index dc6d770..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-storm_user = config['configurations']['global']['storm_user']
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group =  "users"
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index cf6c2c5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  Group(params.user_group)
-  Group(params.smoke_user_group)
-  Group(params.proxyuser_group)
-  User(params.smoke_user,
-       gid=params.user_group,
-       groups=[params.proxyuser_group]
-  )
-  smoke_user_dirs = format(
-    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-  set_uid(params.smoke_user, smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    User(params.hbase_user,
-         gid = params.user_group,
-         groups=[params.user_group])
-    hbase_user_dirs = format(
-      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-    set_uid(params.hbase_user, hbase_user_dirs)
-
-  if params.has_nagios:
-    Group(params.nagios_group)
-    User(params.nagios_user,
-         gid=params.nagios_group)
-
-  if params.has_oozie_server:
-    User(params.oozie_user,
-         gid = params.user_group)
-
-  if params.has_hcat_server_host:
-    User(params.webhcat_user,
-         gid = params.user_group)
-    User(params.hcat_user,
-         gid = params.user_group)
-
-  if params.has_hive_server_host:
-    User(params.hive_user,
-         gid = params.user_group)
-
-  if params.has_resourcemanager:
-    User(params.yarn_user,
-         gid = params.user_group)
-
-  if params.has_ganglia_server:
-    Group(params.gmetad_user)
-    Group(params.gmond_user)
-    User(params.gmond_user,
-         gid=params.user_group,
-        groups=[params.gmond_user])
-    User(params.gmetad_user,
-         gid=params.user_group,
-        groups=[params.gmetad_user])
-
-  User(params.hdfs_user,
-        gid=params.user_group,
-        groups=[params.user_group]
-  )
-  User(params.mapred_user,
-       gid=params.user_group,
-       groups=[params.user_group]
-  )
-  if params.has_zk_host:
-    User(params.zk_user,
-         gid=params.user_group)
-
-  if params.has_storm_server:
-    User(params.storm_user,
-         gid=params.user_group,
-         groups=[params.user_group]
-    )
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  File("/tmp/changeUid.sh",
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-
-def install_packages():
-  Package("unzip")
-  Package("net-snmp")
-  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index c8939fc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py
deleted file mode 100644
index e11bfac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_java()
-    setup_hadoop()
-    setup_configs()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py
deleted file mode 100644
index 1f943df..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#java params
-artifact_dir = "/tmp/HDP-artifacts/"
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#users and groups
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-yarn_user = config['configurations']['global']['yarn_user']
-
-user_group = config['configurations']['global']['user_group']
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#snmp
-snmp_conf_dir = "/etc/snmp/"
-snmp_source = "0.0.0.0/0"
-snmp_community = "hadoop"
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-#hadoop params
-hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/sbin"
-
-task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-limits_conf_dir = "/etc/security/limits.d"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-rca_enabled = config['configurations']['global']['rca_enabled']
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-if System.get_instance().os_family == "suse":
-  jsvc_path = "/usr/lib/bigtop-utils"
-else:
-  jsvc_path = "/usr/libexec/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-#hdfs ha properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namenode_ids:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-rca_property_map = {
-  'ambari.jobhistory.database': ambari_db_rca_url,
-  'ambari.jobhistory.driver': ambari_db_rca_driver,
-  'ambari.jobhistory.user': ambari_db_rca_username,
-  'ambari.jobhistory.password': ambari_db_rca_password,
-  'ambari.jobhistory.logger': 'DEBUG,JHA',
-
-  'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
-  'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
-  'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
-  'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
-  'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
-
-  'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
-  'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
-}
-
-if ('hdfs-log4j' in config['configurations']):
-  log4j_props = config['configurations']['hdfs-log4j']
-  if 'yarn-log4j' in config['configurations']:
-    log4j_props.update(config['configurations']['yarn-log4j'])
-else:
-  log4j_props = None


[19/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json
deleted file mode 100644
index 37f73bf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json
+++ /dev/null
@@ -1,13635 +0,0 @@
-{
-  "HBASE_REGIONSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/hbase/regionserver/compactionTime_avg_time": {
-            "metric": "hbase.regionserver.compactionTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_num_ops": {
-            "metric": "rpc.rpc.closeRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "regionserver.Server.mutationsWithoutWALSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_num_ops": {
-            "metric": "rpc.rpc.unassign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_num_ops": {
-            "metric": "rpc.rpc.modifyTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion_avg_time": {
-            "metric": "rpc.rpc.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore_num_ops": {
-            "metric": "rpc.rpc.getClosestRowBefore_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowAppendCount": {
-            "metric": "regionserver.Server.slowAppendCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow_num_ops": {
-            "metric": "rpc.rpc.lockRow_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion_avg_time": {
-            "metric": "rpc.rpc.flushRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stopMaster_num_ops": {
-            "metric": "rpc.rpc.stopMaster_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balance_avg_time": {
-            "metric": "rpc.rpc.balance_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyColumn_avg_time": {
-            "metric": "rpc.rpc.modifyColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/rootIndexSizeKB": {
-            "metric": "hbase.regionserver.rootIndexSizeKB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper_num_ops": {
-            "metric": "rpc.rpc.getZooKeeper_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheCount": {
-            "metric": "regionserver.Server.blockCacheCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion_num_ops": {
-            "metric": "rpc.rpc.flushRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.putRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.getRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/get_num_ops": {
-            "metric": "rpc.rpc.get_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stopMaster_avg_time": {
-            "metric": "rpc.rpc.stopMaster_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/ping_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo_avg_time": {
-            "metric": "rpc.rpc.getRegionInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow_avg_time": {
-            "metric": "rpc.rpc.lockRow_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/commitPending_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME_num_ops": {
-            "metric": "rpc.rpc.checkOOME_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/reportRSFatalError_num_ops": {
-            "metric": "rpc.rpc.reportRSFatalError_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/reportRSFatalError_avg_time": {
-            "metric": "rpc.rpc.reportRSFatalError_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_min": {
-            "metric": "regionserver.Server.Delete_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClusterStatus_num_ops": {
-            "metric": "rpc.rpc.getClusterStatus_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHTableDescriptors_avg_time": {
-            "metric": "rpc.rpc.getHTableDescriptors_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteColumn_num_ops": {
-            "metric": "rpc.rpc.deleteColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment_num_ops": {
-            "metric": "rpc.rpc.increment_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyColumn_num_ops": {
-            "metric": "rpc.rpc.modifyColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME_avg_time": {
-            "metric": "rpc.rpc.checkOOME_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.next.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcSlowResponse_avg_time": {
-            "metric": "rpc.rpc.RpcSlowResponse_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration_avg_time": {
-            "metric": "rpc.rpc.getConfiguration_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_avg_time": {
-            "metric": "rpc.rpc.unassign_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/canCommit_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Delete_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion_avg_time": {
-            "metric": "rpc.rpc.compactRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/writeRequestsCount": {
-            "metric": "regionserver.Server.writeRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor_num_ops": {
-            "metric": "rpc.rpc.execCoprocessor_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/canCommit_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_min": {
-            "metric": "regionserver.Server.Get_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue_avg_time": {
-            "metric": "rpc.rpc.incrementColumnValue_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteTable_num_ops": {
-            "metric": "rpc.rpc.deleteTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Mutate_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitCount": {
-            "metric": "regionserver.Server.blockCacheHitCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/exists_avg_time": {
-            "metric": "rpc.rpc.exists_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowPutCount": {
-            "metric": "regionserver.Server.slowPutCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
-            "metric": "hbase.regionserver.fsWriteLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete_num_ops": {
-            "metric": "rpc.rpc.delete_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists_num_ops": {
-            "metric": "rpc.rpc.exists_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerStartup_avg_time": {
-            "metric": "rpc.rpc.regionServerStartup_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete_num_ops": {
-            "metric": "rpc.rpc.checkAndDelete_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_avg_time": {
-            "metric": "rpc.rpc.closeRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature_avg_time": {
-            "metric": "rpc.rpc.getProtocolSignature_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/assign_avg_time": {
-            "metric": "rpc.rpc.assign_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionSize_num_ops": {
-            "metric": "hbase.regionserver.compactionSize_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close_avg_time": {
-            "metric": "rpc.rpc.close_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheSize": {
-            "metric": "regionserver.Server.blockCacheSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Mutate_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo_num_ops": {
-            "metric": "rpc.rpc.getHServerInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop_avg_time": {
-            "metric": "rpc.rpc.stop_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped_num_ops": {
-            "metric": "rpc.rpc.isStopped_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_median": {
-            "metric": "regionserver.Server.Mutate_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isMasterRunning_avg_time": {
-            "metric": "rpc.rpc.isMasterRunning_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue_num_ops": {
-            "metric": "rpc.rpc.incrementColumnValue_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
-            "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/readRequestsCount": {
-            "metric": "regionserver.Server.readRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_min": {
-            "metric": "regionserver.Server.Mutate_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/storefileIndexSizeMB": {
-            "metric": "regionserver.Server.storeFileIndexSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/assign_num_ops": {
-            "metric": "rpc.rpc.assign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.close.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_median": {
-            "metric": "regionserver.Server.Delete_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/enableTable_avg_time": {
-            "metric": "rpc.rpc.enableTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_mean": {
-            "metric": "regionserver.Server.Mutate_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close_num_ops": {
-            "metric": "rpc.rpc.close_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/done_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.done_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionSize_avg_time": {
-            "metric": "hbase.regionserver.compactionSize_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteTable_avg_time": {
-            "metric": "rpc.rpc.deleteTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.put.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete_avg_time": {
-            "metric": "rpc.rpc.delete_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/statusUpdate_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClusterStatus_avg_time": {
-            "metric": "rpc.rpc.getClusterStatus_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.put.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_avg_time": {
-            "metric": "rpc.rpc.modifyTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut_avg_time": {
-            "metric": "rpc.rpc.checkAndPut_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put_avg_time": {
-            "metric": "rpc.rpc.put_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitRatio": {
-            "metric": "hbase.regionserver.blockCacheHitRatio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/createTable_avg_time": {
-            "metric": "rpc.rpc.createTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHTableDescriptors_num_ops": {
-            "metric": "rpc.rpc.getHTableDescriptors_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getAlterStatus_avg_time": {
-            "metric": "rpc.rpc.getAlterStatus_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo_num_ops": {
-            "metric": "rpc.rpc.getRegionInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/statusUpdate_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion_num_ops": {
-            "metric": "rpc.rpc.compactRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted_num_ops": {
-            "metric": "rpc.rpc.isAborted_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheEvictedCount": {
-            "metric": "regionserver.Server.blockCacheEvictionCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/disableTable_num_ops": {
-            "metric": "rpc.rpc.disableTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner_num_ops": {
-            "metric": "rpc.rpc.openScanner_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerReport_num_ops": {
-            "metric": "rpc.rpc.regionServerReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions_avg_time": {
-            "metric": "rpc.rpc.openRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Mutate_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isMasterRunning_num_ops": {
-            "metric": "rpc.rpc.isMasterRunning_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balanceSwitch_num_ops": {
-            "metric": "rpc.rpc.balanceSwitch_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/offline_num_ops": {
-            "metric": "rpc.rpc.offline_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_max": {
-            "metric": "regionserver.Server.Get_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort_num_ops": {
-            "metric": "rpc.rpc.abort_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
-            "metric": "hbase.regionserver.blockCacheHitCachingRatio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter_num_ops": {
-            "metric": "rpc.rpc.rollHLogWriter_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions_num_ops": {
-            "metric": "rpc.rpc.openRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion_avg_time": {
-            "metric": "rpc.rpc.splitRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Get_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Delete_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getTask_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries_num_ops": {
-            "metric": "rpc.rpc.replicateLogEntries_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi_avg_time": {
-            "metric": "rpc.rpc.multi_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowIncrementCount": {
-            "metric": "regionserver.Server.slowIncrementCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Mutate_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionQueueSize": {
-            "metric": "regionserver.Server.compactionQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker_avg_time": {
-            "metric": "rpc.rpc.getCatalogTracker_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion_num_ops": {
-            "metric": "rpc.rpc.splitRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balance_num_ops": {
-            "metric": "rpc.rpc.balance_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushTime_num_ops": {
-            "metric": "hbase.regionserver.flushTime_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/shutdown_num_ops": {
-            "metric": "rpc.rpc.shutdown_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatency_num_ops": {
-            "metric": "hbase.regionserver.fsReadLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Get_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName_avg_time": {
-            "metric": "rpc.rpc.getServerName_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionTime_num_ops": {
-            "metric": "hbase.regionserver.compactionTime_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort_avg_time": {
-            "metric": "rpc.rpc.abort_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/enableTable_num_ops": {
-            "metric": "rpc.rpc.enableTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/stores": {
-            "metric": "regionserver.Server.storeCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/addColumn_avg_time": {
-            "metric": "rpc.rpc.addColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName_num_ops": {
-            "metric": "rpc.rpc.getServerName_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/disableTable_avg_time": {
-            "metric": "rpc.rpc.disableTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion_avg_time": {
-            "metric": "rpc.rpc.openRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerReport_avg_time": {
-            "metric": "rpc.rpc.regionServerReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getAlterStatus_num_ops": {
-            "metric": "rpc.rpc.getAlterStatus_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next_avg_time": {
-            "metric": "rpc.rpc.next_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Get_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles_num_ops": {
-            "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/ping_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatency_avg_time": {
-            "metric": "hbase.regionserver.fsReadLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushSize_num_ops": {
-            "metric": "hbase.regionserver.flushSize_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balanceSwitch_avg_time": {
-            "metric": "rpc.rpc.balanceSwitch_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_max": {
-            "metric": "regionserver.Server.Mutate_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.callQueueLen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion_num_ops": {
-            "metric": "rpc.rpc.openRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
-            "metric": "hbase.regionserver.fsSyncLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.getOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/move_num_ops": {
-            "metric": "rpc.rpc.move_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop_num_ops": {
-            "metric": "rpc.rpc.stop_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries_avg_time": {
-            "metric": "rpc.rpc.replicateLogEntries_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_mean": {
-            "metric": "regionserver.Server.Get_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/get_avg_time": {
-            "metric": "rpc.rpc.get_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi_num_ops": {
-            "metric": "rpc.rpc.multi_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.next.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.addToOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteColumn_avg_time": {
-            "metric": "rpc.rpc.deleteColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/regions": {
-            "metric": "regionserver.Server.regionCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles_avg_time": {
-            "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.addToOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheFree": {
-            "metric": "regionserver.Server.blockCacheFreeSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/offline_avg_time": {
-            "metric": "rpc.rpc.offline_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow_avg_time": {
-            "metric": "rpc.rpc.unlockRow_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheMissCount": {
-            "metric": "regionserver.Server.blockCacheMissCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker_num_ops": {
-            "metric": "rpc.rpc.getCatalogTracker_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushQueueSize": {
-            "metric": "regionserver.Server.flushQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.close.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor_avg_time": {
-            "metric": "rpc.rpc.execCoprocessor_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/createTable_num_ops": {
-            "metric": "rpc.rpc.createTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration_num_ops": {
-            "metric": "rpc.rpc.getConfiguration_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped_avg_time": {
-            "metric": "rpc.rpc.isStopped_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter_avg_time": {
-            "metric": "rpc.rpc.rollHLogWriter_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
-            "metric": "hbase.regionserver.fsSyncLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-            "metric": "regionserver.Server.Delete_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-            "metric": "regionserver.Server.staticIndexSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-            "metric": "regionserver.Server.mutationsWithoutWALCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/get/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.get.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_median": {
-            "metric": "regionserver.Server.Get_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner_avg_time": {
-            "metric": "rpc.rpc.openScanner_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcSlowResponse_num_ops": {
-            "metric": "rpc.rpc.RpcSlowResponse_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted_avg_time": {
-            "metric": "rpc.rpc.isAborted_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushSize_avg_time": {
-            "metric": "hbase.regionserver.flushSize_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/commitPending_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore_avg_time": {
-            "metric": "rpc.rpc.getClosestRowBefore_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_max": {
-            "metric": "regionserver.Server.Delete_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/get/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.get.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put_num_ops": {
-            "metric": "rpc.rpc.put_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/move_avg_time": {
-            "metric": "rpc.rpc.move_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/percentFilesLocal": {
-            "metric": "regionserver.Server.percentFilesLocal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
-            "metric": "hbase.regionserver.fsWriteLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getTask_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addColumn_num_ops": {
-            "metric": "rpc.rpc.addColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/maxMemoryM": {
-            "metric": "jvm.metrics.maxMemoryM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.getOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushTime_avg_time": {
-            "metric": "hbase.regionserver.flushTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/done_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.done_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion_num_ops": {
-            "metric": "rpc.rpc.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow_num_ops": {
-            "metric": "rpc.rpc.unlockRow_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowGetCount": {
-            "metric": "regionserver.Server.slowGetCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/shutdown_avg_time": {
-            "metric": "rpc.rpc.shutdown_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerStartup_num_ops": {
-            "metric": "rpc.rpc.regionServerStartup_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/requests": {
-            "metric": "regionserver.Server.totalRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/storefiles": {
-            "metric": "regionserver.Server.storeFileCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/next_num_ops": {
-            "metric": "rpc.rpc.next_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowDeleteCount": {
-            "metric": "regionserver.Server.slowDeleteCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete_avg_time": {
-            "metric": "rpc.rpc.checkAndDelete_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo_avg_time": {
-            "metric": "rpc.rpc.getHServerInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper_avg_time": {
-            "metric": "rpc.rpc.getZooKeeper_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/hlogFileCount": {
-            "metric": "hbase.regionserver.hlogFileCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Get_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Delete_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/memstoreSizeMB": {
-            "metric": "regionserver.Server.memStoreSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature_num_ops": {
-            "metric": "rpc.rpc.getProtocolSignature_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Delete_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-            "metric": "regionserver.Server.staticBloomSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut_num_ops": {
-            "metric": "rpc.rpc.checkAndPut_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment_avg_time": {
-            "metric": "rpc.rpc.increment_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/hbase/regionserver/slowPutCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/percentFilesLocal": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheFree": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheMissCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/flushQueueSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowAppendCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowIncrementCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheEvictedCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/compactionQueueSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowGetCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/readRequestsCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/storefileIndexSizeMB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/requests": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/storefiles": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/writeRequestsCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowDeleteCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/stores": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/memstoreSizeMB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/regions": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheHitCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/hbase/regionserver/compactionTime_avg_time": {
-            "metric": "hbase.regionserver.compactionTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_num_ops": {
-            "metric": "rpc.rpc.closeRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "regionserver.Server.mutationsWithoutWALSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_num_ops": {
-            "metric": "rpc.rpc.unassign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_num_ops": {
-            "metric": "rpc.rpc.modifyTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-            "pointInTime":

<TRUNCATED>

[14/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh
deleted file mode 100644
index fa90c2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh
deleted file mode 100644
index 9350776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py
deleted file mode 100644
index 2993d3a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hcat():
-  import params
-
-  Directory(params.hcat_conf_dir,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            recursive=True
-  )
-
-  hcat_TemplateConfig('hcat-env.sh')
-
-
-def hcat_TemplateConfig(name):
-  import params
-
-  TemplateConfig(format("{hcat_conf_dir}/{name}"),
-                 owner=params.hcat_user,
-                 group=params.user_group
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100644
index 8b5921a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hcat import hcat
-
-class HCatClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    hcat()
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100644
index bc75e44..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import get_unique_id_and_date
-
-def hcat_service_check():
-    import params
-    unique = get_unique_id_and_date()
-    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinit_cmd = ""
-
-    File('/tmp/hcatSmoke.sh',
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir)
-
-    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index 44e6a77..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def hive(name=None):
-  import params
-
-  if name == 'metastore' or name == 'hiveserver2':
-    hive_config_dir = params.hive_server_conf_dir
-    config_file_mode = 0600
-    jdbc_connector()
-  else:
-    hive_config_dir = params.hive_conf_dir
-    config_file_mode = 0644
-
-  Directory(hive_config_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=config_file_mode
-  )
-
-  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
-               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
-
-  Execute(cmd,
-          not_if=format("[ -f {check_db_connection_jar_name}]"))
-
-  if name == 'metastore':
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=StaticFile('startHiveserver2.sh')
-    )
-
-  if name != "client":
-    crt_directory(params.hive_pid_dir)
-    crt_directory(params.hive_log_dir)
-    crt_directory(params.hive_var_lib)
-
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
-  )
-
-  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
-  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
-
-  log4j_exec_filename = 'hive-exec-log4j.properties'
-  if (params.log4j_exec_props != None):
-    PropertiesFile(log4j_exec_filename,
-                   dir=params.hive_conf_dir,
-                   properties=params.log4j_exec_props,
-                   mode=0664,
-                   owner=params.hive_user,
-                   group=params.user_group
-    )
-  elif (os.path.exists("{params.hive_conf_dir}/{log4j_exec_filename}.template")):
-    File(format("{params.hive_conf_dir}/{log4j_exec_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{params.hive_conf_dir}/{log4j_exec_filename}.template"))
-    )
-
-  log4j_filename = 'hive-log4j.properties'
-  if (params.log4j_props != None):
-    PropertiesFile(log4j_filename,
-                   dir=params.hive_conf_dir,
-                   properties=params.log4j_props,
-                   mode=0664,
-                   owner=params.hive_user,
-                   group=params.user_group
-    )
-  elif (os.path.exists("{params.hive_conf_dir}/{log4j_filename}.template")):
-    File(format("{params.hive_conf_dir}/{log4j_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{params.hive_conf_dir}/{log4j_filename}.template"))
-    )
-
-
-def crt_directory(name):
-  import params
-
-  Directory(name,
-            recursive=True,
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-
-def crt_file(name):
-  import params
-
-  File(name,
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-
-def jdbc_connector():
-  import params
-
-  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
-    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            path=["/bin", "usr/bin/"])
-
-  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-    cmd = format(
-      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
-      "cp {driver_curl_target} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py
deleted file mode 100644
index 0a5fb2b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-
-from hive import hive
-
-class HiveClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100644
index c741174..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveMetastore(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='metastore')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'metastore',
-                   action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'metastore',
-                   action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py
deleted file mode 100644
index 3ad81a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveServer(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='hiveserver2')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'hiveserver2',
-                  action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'hiveserver2',
-                  action = 'stop'
-    )
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py
deleted file mode 100644
index e8d4e5c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hive_service(
-    name,
-    action='start'):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format(
-      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format(
-      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
-
-  if action == 'start':
-    demon_cmd = format("{cmd}")
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(demon_cmd,
-            user=params.hive_user,
-            not_if=no_op_test
-    )
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
-
-  elif action == 'stop':
-    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
-    Execute(demon_cmd)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100644
index 8567311..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from mysql_service import mysql_service
-
-class MysqlServer(Script):
-
-  if System.get_instance().os_family == "suse":
-    daemon_name = 'mysql'
-  else:
-    daemon_name = 'mysqld'
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action='start')
-
-    File(params.mysql_adduser_path,
-         mode=0755,
-         content=StaticFile('addMysqlUser.sh')
-    )
-
-    # Autoescaping
-    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
-           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-    mysql_service(daemon_name=self.daemon_name, action='stop')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'stop')
-
-  def status(self, env):
-    mysql_service(daemon_name=self.daemon_name, action = 'status')
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100644
index cfb3e08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'):
-  cmd = format('service {daemon_name} {action}')
-
-  if action == 'status':
-    logoutput = False
-  else:
-    logoutput = True
-
-  Execute(cmd,
-          path="/usr/local/bin/:/bin/:/sbin/",
-          tries=1,
-          logoutput=logoutput)
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py
deleted file mode 100644
index 734d3ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_server_conf_dir = "/etc/hive/conf.server"
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-
-#users
-hive_user = config['configurations']['global']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
-#JDBC driver jar name
-hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
-if hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = "mysql-connector-java.jar"
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = "ojdbc6.jar"
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-#common
-hive_metastore_port = config['configurations']['global']['hive_metastore_port']
-hive_var_lib = '/var/lib/hive'
-hive_server_host = config['clusterHostInfo']['hive_server_host']
-hive_url = format("jdbc:hive2://{hive_server_host}:10000")
-
-smokeuser = config['configurations']['global']['smokeuser']
-smoke_test_sql = "/tmp/hiveserver2.sql"
-smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-
-#hive_env
-hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-
-#hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh'
-
-hadoop_home = '/usr'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-java_share_dir = '/usr/share/java'
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
-artifact_dir = "/tmp/HDP-artifacts/"
-
-target = format("{hive_lib}/{jdbc_jar_name}")
-
-jdk_location = config['hostLevelParams']['jdk_location']
-driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
-
-start_hiveserver2_path = "/tmp/start_hiveserver2_script"
-start_metastore_path = "/tmp/start_metastore_script"
-
-hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-java64_home = config['hostLevelParams']['java_home']
-
-##### MYSQL
-
-db_name = config['configurations']['global']['hive_database_name']
-mysql_user = "mysql"
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = "/tmp/addMysqlUser.sh"
-
-########## HCAT
-
-hcat_conf_dir = '/etc/hcatalog/conf'
-
-metastore_port = 9933
-hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
-
-hadoop_conf_dir = '/etc/hadoop/conf'
-
-#hive-log4j.properties.template
-if ('hive-log4j' in config['configurations']):
-  log4j_props = config['configurations']['hive-log4j']
-else:
-  log4j_props = None
-
-#hive-exec-log4j.properties.template
-if ('hive-exec-log4j' in config['configurations']):
-  log4j_exec_props = config['configurations']['hive-exec-log4j']
-else:
-  log4j_exec_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py
deleted file mode 100644
index 111e8a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-from hcat_service_check import hcat_service_check
-
-class HiveServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
-      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
-      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
-    else:
-      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
-
-    File(params.smoke_test_path,
-         content=StaticFile('hiveserver2Smoke.sh'),
-         mode=0755
-    )
-
-    File(params.smoke_test_sql,
-         content=StaticFile('hiveserver2.sql')
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True,
-            user=params.smokeuser)
-
-    hcat_service_check()
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py
deleted file mode 100644
index 7770975..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
-hive_pid = 'hive-server.pid'
-
-hive_metastore_pid = 'hive.pid'
-
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2
deleted file mode 100644
index 2a35240..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HCAT_PID_DIR={{hcat_pid_dir}}/
-HCAT_LOG_DIR={{hcat_log_dir}}/
-HCAT_CONF_DIR={{hcat_conf_dir}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT={{hcat_dbroot}}
-USER={{hcat_user}}
-METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2
deleted file mode 100644
index 548262a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=
-export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index a7396b9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,113 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-            <commandScript>
-              <script>scripts/nagios_server.py</script>
-              <scriptType>PYTHON</scriptType>
-              <timeout>600</timeout>
-            </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>perl</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>fping</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>php5-json</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>redhat5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>oraclelinux5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 19347b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $c_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $corrupt_blocks = $object['CorruptBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-      $c_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      $c_percent = ($corrupt_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
-             ">, missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 15c85eb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 50b075a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2


[26/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php
new file mode 100644
index 0000000..af72723
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hdfs_capacity.php
@@ -0,0 +1,109 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the % HDFS capacity used >= warn and critical limits.
+ * check_jmx -H hostaddress -p port -w 1 -c 1
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $percent = 0;
+    $object = $json_array['beans'][0];
+    $CapacityUsed = $object['CapacityUsed'];
+    $CapacityRemaining = $object['CapacityRemaining'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
+    if($CapacityTotal == 0) {
+      $percent = 0;
+    } else {
+      $percent = ($CapacityUsed/$CapacityTotal)*100;
+      break;
+    }
+  }
+  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
+             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
+
+  if ($percent >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh
new file mode 100644
index 0000000..640c077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hive_metastore_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#The uri is of the form thrift://<hostname>:<port>
+HOST=$1
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
+export JAVA_HOME=$JAVA_HOME
+out=`hcat $HCAT_URL -e "show databases" 2>&1`
+if [[ "$?" -ne 0 ]]; then
+  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
+  exit 2;
+fi
+echo "OK: Hive Metastore status OK";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh
new file mode 100644
index 0000000..076d9b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_hue_status.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+status=`/etc/init.d/hue status 2>&1`
+
+if [[ "$?" -ne 0 ]]; then
+	echo "WARNING: Hue is stopped";
+	exit 1;
+fi
+
+echo "OK: Hue is running";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
new file mode 100644
index 0000000..15c85eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+MAPRED_LOCAL_DIRS=$1
+CRITICAL=`echo $2 | cut -d % -f 1`
+IFS=","
+for mapred_dir in $MAPRED_LOCAL_DIRS
+do
+  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
+  if [ $percent -ge $CRITICAL ]; then
+    echo "CRITICAL: MapReduce local dir is full."
+    exit 2
+  fi
+done
+echo "OK: MapReduce local dir space is available."
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php
new file mode 100644
index 0000000..186166d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_name_dir_status.php
@@ -0,0 +1,93 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to namenode, get the jmx-json document
+ * check the NameDirStatuses to find any offline (failed) directories
+ * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
+ */
+ 
+  include "hdp_nagios_init.php";
+
+  $options = getopt("h:p:e:k:r:t:s:");
+  //Check only for mandatory options
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+  
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if ($object['NameDirStatuses'] == "") {
+    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
+    exit(1);
+  }
+  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
+  $failed_dir_count = count($NameDirStatuses['failed']);
+  $out_msg = "CRITICAL: Offline NameNode directories: ";
+  if ($failed_dir_count > 0) {
+    foreach ($NameDirStatuses['failed'] as $key => $value) {
+      $out_msg = $out_msg . $key . ":" . $value . ", ";
+    }
+    echo $out_msg . "\n";
+    exit (2);
+  }
+  echo "OK: All NameNode directories are active" . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
+  }
+?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh
new file mode 100644
index 0000000..50b075a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_namenodes_ha.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+IFS=',' read -a namenodes <<< "$1"
+port=$2
+totalNN=${#namenodes[@]}
+activeNN=()
+standbyNN=()
+unavailableNN=()
+
+for nn in "${namenodes[@]}"
+do
+  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
+  if [ "$status" == "active" ]; then
+    activeNN[${#activeNN[*]}]="$nn"
+  elif [ "$status" == "standby" ]; then
+    standbyNN[${#standbyNN[*]}]="$nn"
+  elif [ "$status" == "" ]; then
+    unavailableNN[${#unavailableNN[*]}]="$nn"
+  fi
+done
+
+message=""
+critical=false
+
+if [ ${#activeNN[@]} -gt 1 ]; then
+  critical=true
+  message=$message" Only one NN can have HAState=active;"
+elif [ ${#activeNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Active NN available;"
+elif [ ${#standbyNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Standby NN available;"
+fi
+
+NNstats=" Active<"
+for nn in "${activeNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Standby<"
+for nn in "${standbyNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Unavailable<"
+for nn in "${unavailableNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">"
+
+if [ $critical == false ]; then
+  echo "OK: NameNode HA healthy;"$NNstats
+  exit 0
+fi
+
+echo "CRITICAL:"$message$NNstats
+exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh
new file mode 100644
index 0000000..020b41d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_nodemanager_health.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HOST=$1
+PORT=$2
+NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
+SEC_ENABLED=$3
+export PATH="/usr/bin:$PATH"
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$4
+  NAGIOS_USER=$5
+  KINIT_PATH=$6
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+
+RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
+if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
+  echo "OK: NodeManager healthy";
+  exit 0;
+fi
+echo "CRITICAL: NodeManager unhealthy";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh
new file mode 100644
index 0000000..820ee99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_oozie_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# OOZIE_URL is of the form http://<hostname>:<port>/oozie
+HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+OOZIE_URL="http://$HOST:$PORT/oozie"
+export JAVA_HOME=$JAVA_HOME
+out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing Oozie Server status [$out]";
+  exit 2;
+fi
+echo "OK: Oozie Server status [$out]";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php
new file mode 100644
index 0000000..463f69b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_rpcq_latency.php
@@ -0,0 +1,104 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
+ * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
+ * Warning and Critical values are in seconds
+ * Service Name = JobTracker, NameNode, JobHistoryServer
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $master=$options['n'];
+  $warn=$options['w'];
+  $crit=$options['c'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  } 
+  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
+  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
+
+  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
+             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
+             "> Secs";
+
+  if ($RpcQueueTime_avg_time >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($RpcQueueTime_avg_time >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh
new file mode 100644
index 0000000..7fbc4c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_templeton_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# out='{"status":"ok","version":"v1"}<status_code:200>'
+HOST=$1
+PORT=$2
+VERSION=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then 
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+regex="^.*\"status\":\"ok\".*<status_code:200>$"
+out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
+if [[ $out =~ $regex ]]; then
+  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
+  echo "OK: WebHCat Server status [$out]";
+  exit 0;
+fi
+echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh
new file mode 100644
index 0000000..b23045e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/check_webui.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+checkurl () {
+  url=$1
+  curl $url -o /dev/null
+  echo $?
+}
+
+service=$1
+host=$2
+port=$3
+
+if [[ -z "$service" || -z "$host" ]]; then
+  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
+  exit 3;
+fi
+
+case "$service" in
+
+jobtracker) 
+    jtweburl="http://$host:$port"
+    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
+      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
+      exit 1;
+    fi
+    ;;
+namenode)
+    nnweburl="http://$host:$port"
+    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
+      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
+      exit 1;
+    fi
+    ;;
+jobhistory)
+    jhweburl="http://$host:$port/jobhistoryhome.jsp"
+    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
+      exit 1;
+    fi
+    ;;
+hbase)
+    hbaseweburl="http://$host:$port/master-status"
+    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
+      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
+      exit 1;
+    fi
+    ;;
+resourcemanager)
+    rmweburl="http://$host:$port/cluster"
+    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
+      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
+      exit 1;
+    fi
+    ;;
+historyserver2)
+    hsweburl="http://$host:$port/jobhistory"
+    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
+      exit 1;
+    fi
+    ;;
+*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
+   exit 3
+   ;;
+esac
+
+echo "OK: Successfully accessed $service Web UI"
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php
new file mode 100644
index 0000000..487eb43
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/files/hdp_nagios_init.php
@@ -0,0 +1,81 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Common functions called from other alerts
+ *
+ */
+ 
+ /*
+ * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
+ * make kinit call in this case.
+ */
+  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
+    if($security_enabled === 'true') {
+    
+      $is_logined = is_logined($principal_name);
+      
+      if (!$is_logined)
+        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
+      else
+        $status = array(0, '');
+    } else {
+      $status = array(0, '');
+    }
+  
+    return $status;
+  }
+  
+  
+  /*
+  * Checks if user is logined on kerberos
+  */
+  function is_logined($principal_name) {
+    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
+    $check_output =  shell_exec($check_cmd);
+    
+    if ($check_output)
+      return false;
+    else
+      return true;
+  }
+
+  /*
+  * Runs kinit command.
+  */
+  function kinit($kinit_path_local, $keytab_path, $principal_name) {
+    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
+    $kinit_output = shell_exec($init_cmd);
+    if ($kinit_output) 
+      $status = array(1, $kinit_output);
+    else
+      $status = array(0, '');
+      
+    return $status;
+  }
+
+  function logout() {
+    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
+      $status = true;
+    else
+      $status = false;
+      
+    return $status;
+  }
+ 
+ ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py
new file mode 100644
index 0000000..964225e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/functions.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.libraries.script.config_dictionary import UnknownConfiguration
+
+def get_port_from_url(address):
+  if not is_empty(address):
+    return address.split(':')[-1]
+  else:
+    return address
+  
+def is_empty(var):
+  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py
new file mode 100644
index 0000000..9150995
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from nagios_server_config import nagios_server_config
+
+def nagios():
+  import params
+
+  File( params.nagios_httpd_config_file,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    content = Template("nagios.conf.j2"),
+    mode   = 0644
+  )
+
+  # enable snmpd
+  Execute( "service snmpd start; chkconfig snmpd on",
+    path = "/usr/local/bin/:/bin/:/sbin/"
+  )
+  
+  Directory( params.conf_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+
+  Directory( [params.plugins_dir, params.nagios_obj_dir])
+
+  Directory( params.nagios_pid_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755,
+    recursive = True
+  )
+
+  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    recursive = True
+  )
+  
+  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755
+  )
+
+  nagios_server_config()
+
+  set_web_permisssions()
+
+  File( format("{conf_dir}/command.cfg"),
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+  
+  
+def set_web_permisssions():
+  import params
+
+  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
+  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
+  Execute( cmd,
+    not_if = test
+  )
+
+  File( "/etc/nagios/htpasswd.users",
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode  = 0640
+  )
+
+  if System.get_instance().os_family == "suse":
+    command = format("usermod -G {nagios_group} wwwrun")
+  else:
+    command = format("usermod -a -G {nagios_group} apache")
+  
+  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py
new file mode 100644
index 0000000..02685c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from nagios import nagios
+from nagios_service import nagios_service
+
+         
+class NagiosServer(Script):
+  def install(self, env):
+    remove_conflicting_packages()
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    nagios()
+
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    self.configure(env) # done for updating configs after Security enabled
+    nagios_service(action='start')
+
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    
+    nagios_service(action='stop')
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nagios_pid_file)
+    
+def remove_conflicting_packages():  
+  Package( 'hdp_mon_nagios_addons',
+    action = "remove"
+  )
+
+  Package( 'nagios-plugins',
+    action = "remove"
+  )
+
+  Execute( "rpm -e --allmatches --nopostun nagios",
+    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+    ignore_failures = True 
+  )
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
+  stroutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
+  
+  NagiosServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py
new file mode 100644
index 0000000..9f6c884
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_server_config.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_server_config():
+  import params
+  
+  nagios_server_configfile( 'nagios.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'resource.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'hadoop-hosts.cfg')
+  nagios_server_configfile( 'hadoop-hostgroups.cfg')
+  nagios_server_configfile( 'hadoop-servicegroups.cfg')
+  nagios_server_configfile( 'hadoop-services.cfg')
+  nagios_server_configfile( 'hadoop-commands.cfg')
+  nagios_server_configfile( 'contacts.cfg')
+  
+  if System.get_instance().os_family != "suse":
+    nagios_server_configfile( 'nagios',
+                              config_dir = '/etc/init.d/', 
+                              mode = 0755, 
+                              owner = 'root', 
+                              group = 'root'
+    )
+
+  nagios_server_check( 'check_cpu.pl')
+  nagios_server_check( 'check_datanode_storage.php')
+  nagios_server_check( 'check_aggregate.php')
+  nagios_server_check( 'check_hdfs_blocks.php')
+  nagios_server_check( 'check_hdfs_capacity.php')
+  nagios_server_check( 'check_rpcq_latency.php')
+  nagios_server_check( 'check_webui.sh')
+  nagios_server_check( 'check_name_dir_status.php')
+  nagios_server_check( 'check_oozie_status.sh')
+  nagios_server_check( 'check_templeton_status.sh')
+  nagios_server_check( 'check_hive_metastore_status.sh')
+  nagios_server_check( 'check_hue_status.sh')
+  nagios_server_check( 'check_mapred_local_dir_used.sh')
+  nagios_server_check( 'check_nodemanager_health.sh')
+  nagios_server_check( 'check_namenodes_ha.sh')
+  nagios_server_check( 'hdp_nagios_init.php')
+
+
+def nagios_server_configfile(
+  name,
+  owner = None,
+  group = None,
+  config_dir = None,
+  mode = None
+):
+  import params
+  owner = params.nagios_user if not owner else owner
+  group = params.user_group if not group else group
+  config_dir = params.nagios_obj_dir if not config_dir else config_dir
+  
+  TemplateConfig( format("{config_dir}/{name}"),
+    owner          = owner,
+    group          = group,
+    mode           = mode
+  )
+
+def nagios_server_check(name):
+  File( format("{plugins_dir}/{name}"),
+    content = StaticFile(name), 
+    mode = 0755
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py
new file mode 100644
index 0000000..cc411b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/nagios_service.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_service(action='start'): # start or stop
+  import params
+
+  if action == 'start':
+   command = "service nagios start"
+  elif action == 'stop':
+   command = format("service nagios stop && rm -f {nagios_pid_file}")
+
+  Execute( command,
+     path    = "/usr/local/bin/:/bin/:/sbin/"      
+  )
+  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
new file mode 100644
index 0000000..4edae8c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from functions import get_port_from_url
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/nagios"
+nagios_var_dir = "/var/nagios"
+nagios_rw_dir = "/var/nagios/rw"
+plugins_dir = "/usr/lib64/nagios/plugins"
+nagios_obj_dir = "/etc/nagios/objects"
+check_result_path = "/var/nagios/spool/checkresults"
+nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
+nagios_log_dir = "/var/log/nagios"
+nagios_log_archives_dir = format("{nagios_log_dir}/archives")
+nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
+nagios_lookup_daemon_str = "/usr/sbin/nagios"
+nagios_pid_dir = status_params.nagios_pid_dir
+nagios_pid_file = status_params.nagios_pid_file
+nagios_resource_cfg = format("{conf_dir}/resource.cfg")
+nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
+nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
+nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
+nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
+eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
+nagios_principal_name = default("nagios_principal_name", "nagios")
+hadoop_ssl_enabled = False
+
+namenode_metadata_port = "8020"
+oozie_server_port = "11000"
+# different to HDP1    
+namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+# different to HDP1  
+snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.namenode.secondary.http-address"])
+
+hbase_master_rpc_port = "60000"
+rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+nm_port = "8042"
+hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'])
+journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
+datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
+flume_port = "4159"
+hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
+templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
+hbase_rs_port = "60030"
+
+# this is different for HDP1
+nn_metrics_property = "FSNamesystem"
+clientPort = config['configurations']['global']['clientPort'] #ZK 
+
+
+java64_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['global']['security_enabled']
+
+nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+ganglia_port = "8651"
+ganglia_collector_slaves_port = "8660"
+ganglia_collector_namenode_port = "8661"
+ganglia_collector_jobtracker_port = "8662"
+ganglia_collector_hbase_port = "8663"
+ganglia_collector_rm_port = "8664"
+ganglia_collector_nm_port = "8660"
+ganglia_collector_hs_port = "8666"
+  
+all_ping_ports = config['clusterHostInfo']['all_ping_ports']
+
+if System.get_instance().os_family == "suse":
+  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
+  htpasswd_cmd = "htpasswd2"
+else:
+  nagios_p1_pl = "/usr/bin/p1.pl"
+  htpasswd_cmd = "htpasswd"
+  
+nagios_user = config['configurations']['global']['nagios_user']
+nagios_group = config['configurations']['global']['nagios_group']
+nagios_web_login = config['configurations']['global']['nagios_web_login']
+nagios_web_password = config['configurations']['global']['nagios_web_password']
+user_group = config['configurations']['global']['user_group']
+nagios_contact = config['configurations']['global']['nagios_contact']
+
+namenode_host = default("/clusterHostInfo/namenode_host", None)
+_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
+_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
+_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
+_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
+_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
+_rm_host = default("/clusterHostInfo/rm_host", None)
+_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
+_hs_host = default("/clusterHostInfo/hs_host", None)
+_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
+_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
+_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
+_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
+_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
+_oozie_server = default("/clusterHostInfo/oozie_server",None)
+_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
+# can differ on HDP1
+#_mapred_tt_hosts = _slave_hosts
+#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
+_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
+all_hosts = config['clusterHostInfo']['all_hosts']
+
+
+hostgroup_defs = {
+    'namenode' : namenode_host,
+    'snamenode' : _snamenode_host,
+    'slaves' : _slave_hosts,
+    # HDP1
+    #'tasktracker-servers' : _mapred_tt_hosts,
+    'agent-servers' : all_hosts,
+    'nagios-server' : _nagios_server_host,
+    'jobtracker' : _jtnode_host,
+    'ganglia-server' : _ganglia_server_host,
+    'flume-servers' : _flume_hosts,
+    'zookeeper-servers' : _zookeeper_hosts,
+    'hbasemasters' : hbase_master_hosts,
+    'hiveserver' : _hive_server_host,
+    'region-servers' : _hbase_rs_hosts,
+    'oozie-server' : _oozie_server,
+    'webhcat-server' : _webhcat_server_host,
+    'hue-server' : _hue_server_host,
+    'resourcemanager' : _rm_host,
+    'nodemanagers' : _nm_hosts,
+    'historyserver2' : _hs_host,
+    'journalnodes' : _journalnode_hosts
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py
new file mode 100644
index 0000000..33b35fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+nagios_pid_dir = "/var/run/nagios"
+nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2
new file mode 100644
index 0000000..9dada51
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/contacts.cfg.j2
@@ -0,0 +1,91 @@
+###############################################################################
+# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
+#
+# Last Modified: 05-31-2007
+#
+# NOTES: This config file provides you with some example contact and contact
+#        group definitions that you can reference in host and service
+#        definitions.
+#       
+#        You don't need to keep these definitions in a separate file from your
+#        other object definitions.  This has been done just to make things
+#        easier to understand.
+#
+###############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+
+###############################################################################
+###############################################################################
+#
+# CONTACTS
+#
+###############################################################################
+###############################################################################
+
+# Just one contact defined by default - the Nagios admin (that's you)
+# This contact definition inherits a lot of default values from the 'generic-contact' 
+# template which is defined elsewhere.
+
+define contact{
+        contact_name    {{nagios_web_login}}                                        ; Short name of user
+        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
+        alias           Nagios Admin                                                ; Full name of user
+
+        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
+        }
+
+# Contact which writes all Nagios alerts to the system logger.
+define contact{
+        contact_name                    sys_logger         ; Short name of user
+        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
+        alias                           System Logger      ; Full name of user
+        host_notifications_enabled      1
+        service_notifications_enabled   1
+        service_notification_period     24x7
+        host_notification_period        24x7
+        service_notification_options    w,u,c,r,s
+        host_notification_options       d,u,r,s
+        can_submit_commands             1
+        retain_status_information       1
+        service_notification_commands   service_sys_logger
+        host_notification_commands      host_sys_logger
+        }
+
+###############################################################################
+###############################################################################
+#
+# CONTACT GROUPS
+#
+###############################################################################
+###############################################################################
+
+# We only have one contact in this simple configuration file, so there is
+# no need to create more than one contact group.
+
+define contactgroup {
+        contactgroup_name       admins
+        alias                   Nagios Administrators
+        members                 {{nagios_web_login}},sys_logger
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
new file mode 100644
index 0000000..99870d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
@@ -0,0 +1,114 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% if env.system.os_family != "suse" %}
+# 'check_cpu' check remote cpu load
+define command {
+        command_name    check_cpu
+        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
+       }
+{% endif %}
+
+# Check data node storage full 
+define command {
+        command_name    check_datanode_storage
+        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
+       }
+
+define command{
+        command_name    check_hdfs_blocks
+        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
+       }
+
+define command{
+        command_name    check_hdfs_capacity
+        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_aggregate
+        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_rpcq_latency
+        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_nagios
+        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
+       }
+
+define command{
+        command_name    check_webui
+        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
+       }
+
+define command{
+        command_name    check_name_dir_status
+        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
+       }
+
+define command{
+        command_name    check_oozie_status
+        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_templeton_status
+        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_hive_metastore_status
+        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+define command{
+        command_name    check_hue_status
+        command_line    $USER1$/check_hue_status.sh
+       }
+
+define command{
+       command_name    check_mapred_local_dir_used_space
+       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
+       }
+
+define command{
+       command_name    check_namenodes_ha
+       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
+       }
+
+define command{
+        command_name    check_nodemanager_health
+        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
+       }
+
+define command{
+        command_name    host_sys_logger
+        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
+       }
+
+define command{
+        command_name    service_sys_logger
+        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
+       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
new file mode 100644
index 0000000..2bcbf7c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
@@ -0,0 +1,15 @@
+{% for name, hosts in hostgroup_defs.iteritems() %}
+{% if hosts %}
+define hostgroup {
+        hostgroup_name  {{name}}
+        alias           {{name}}
+        members         {{','.join(hosts)}}
+}
+{% endif %}
+{% endfor %}
+
+define hostgroup {
+        hostgroup_name  all-servers
+        alias           All Servers
+        members         {{','.join(all_hosts)}}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
new file mode 100644
index 0000000..62555d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
@@ -0,0 +1,16 @@
+{% for host in all_hosts %}
+define host {
+        alias        {{host}}
+        host_name    {{host}}
+        use          linux-server
+        address      {{host}}
+        check_interval         0.25
+        retry_interval         0.25
+        max_check_attempts     4
+        notifications_enabled     1
+        first_notification_delay  0     # Send notification soon after change in the hard state
+        notification_interval     0     # Send the notification once
+        notification_options      d,u,r
+}
+
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
new file mode 100644
index 0000000..0101ce6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
@@ -0,0 +1,80 @@
+{% if hostgroup_defs['namenode'] or
+  hostgroup_defs['snamenode']  or
+  hostgroup_defs['slaves'] %}
+define servicegroup {
+  servicegroup_name  HDFS
+  alias  HDFS Checks
+}
+{% endif %}
+{%if hostgroup_defs['jobtracker'] or
+  hostgroup_defs['historyserver2']-%}
+define servicegroup {
+  servicegroup_name  MAPREDUCE
+  alias  MAPREDUCE Checks
+}
+{% endif %}
+{%if hostgroup_defs['resourcemanager'] or
+  hostgroup_defs['nodemanagers'] %}
+define servicegroup {
+  servicegroup_name  YARN
+  alias  YARN Checks
+}
+{% endif %}
+{%if hostgroup_defs['flume-servers'] %}
+define servicegroup {
+  servicegroup_name  FLUME
+  alias  FLUME Checks
+}
+{% endif %}
+{%if hostgroup_defs['hbasemasters'] %}
+define servicegroup {
+  servicegroup_name  HBASE
+  alias  HBASE Checks
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+define servicegroup {
+  servicegroup_name  OOZIE
+  alias  OOZIE Checks
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+define servicegroup {
+  servicegroup_name  WEBHCAT
+  alias  WEBHCAT Checks
+}
+{% endif %}
+{% if hostgroup_defs['nagios-server'] %}
+define servicegroup {
+  servicegroup_name  NAGIOS
+  alias  NAGIOS Checks
+}
+{% endif %}
+{% if hostgroup_defs['ganglia-server'] %}
+define servicegroup {
+  servicegroup_name  GANGLIA
+  alias  GANGLIA Checks
+}
+{% endif %}
+{% if hostgroup_defs['hiveserver'] %}
+define servicegroup {
+  servicegroup_name  HIVE-METASTORE
+  alias  HIVE-METASTORE Checks
+}
+{% endif %}
+{% if hostgroup_defs['zookeeper-servers'] %}
+define servicegroup {
+  servicegroup_name  ZOOKEEPER
+  alias  ZOOKEEPER Checks
+}
+{% endif %}
+define servicegroup {
+  servicegroup_name  AMBARI
+  alias  AMBARI Checks
+}
+{% if hostgroup_defs['hue-server'] %}
+define servicegroup {
+  servicegroup_name  HUE
+  alias  HUE Checks
+}
+{% endif %}
\ No newline at end of file