You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2015/08/17 07:13:26 UTC

[01/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Repository: ambari
Updated Branches:
  refs/heads/trunk a236102cf -> 930d44999


http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
deleted file mode 100644
index aaf60b4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  zk_home = '/usr/phd/current/zookeeper-client'
-  zk_bin = '/usr/phd/current/zookeeper-client/bin'
-  smoke_script = '/usr/phd/current/zookeeper-client/bin/zkCli.sh'
-else:
-  zk_home = '/usr'
-  zk_bin = '/usr/lib/zookeeper/bin'
-  smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['zookeeper-env']['zk_user']
-hostname = config['hostname']
-user_group = config['configurations']['cluster-env']['user_group']
-zk_env_sh_template = config['configurations']['zookeeper-env']['content']
-
-zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
-zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
-zk_pid_dir = status_params.zk_pid_dir
-zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize = "-Xmx1024m"
-
-tickTime = config['configurations']['zookeeper-env']['tickTime']
-initLimit = config['configurations']['zookeeper-env']['initLimit']
-syncLimit = config['configurations']['zookeeper-env']['syncLimit']
-clientPort = config['configurations']['zookeeper-env']['clientPort']
-
-if 'zoo.cfg' in config['configurations']:
-  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
-else:
-  zoo_cfg_properties_map = {}
-zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
-
-zk_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper@EXAMPLE.COM")
-zk_principal = zk_principal_name.replace('_HOST',hostname.lower())
-
-java64_home = config['hostLevelParams']['java_home']
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-
-zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
-zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
-zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-
-#log4j.properties
-if (('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j'])):
-  log4j_props = config['configurations']['zookeeper-log4j']['content']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
deleted file mode 100644
index 87c13db..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ZookeeperServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File(format("{tmp_dir}/zkSmoke.sh"),
-         mode=0755,
-         content=StaticFile('zkSmoke.sh')
-    )
-
-    cmd_qourum = format("{tmp_dir}/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
-                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
-                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd_qourum,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  ZookeeperServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/status_params.py
deleted file mode 100644
index 36c5c30..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir']
-zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper.py
deleted file mode 100644
index 4f2bb1f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-
-def zookeeper(type = None):
-  import params
-
-  Directory(params.config_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  File(format("{config_dir}/zookeeper-env.sh"),
-       content=InlineTemplate(params.zk_env_sh_template),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-  
-
-  configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("configuration.xsl", template_name="configuration.xsl.j2")
-
-  Directory(params.zk_pid_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_log_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_data_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  if type == 'server':
-    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
-
-    File(format("{zk_data_dir}/myid"),
-         mode = 0644,
-         content = myid
-    )
-
-  if (params.log4j_props != None):
-    File(format("{params.config_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.zk_user,
-         content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
-    File(format("{params.config_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.zk_user
-    )
-
-  if params.security_enabled:
-    if type == "server":
-      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-    else:
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-
-  File(format("{config_dir}/zoo_sample.cfg"),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-def configFile(name, template_name=None):
-  import params
-
-  File(format("{config_dir}/{name}"),
-       content=Template(template_name),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py
deleted file mode 100644
index 4bffac3..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-
-class ZookeeperClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    zookeeper(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  ZookeeperClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
deleted file mode 100644
index 9b9112c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-from zookeeper_service import zookeeper_service
-
-class ZookeeperServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    zookeeper(type='server')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    zookeeper_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    zookeeper_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zk_pid_file)
-
-if __name__ == "__main__":
-  ZookeeperServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py
deleted file mode 100644
index d642bc6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def zookeeper_service(action='start'):
-  import params
-
-  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
-
-  if action == 'start':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
-    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.zk_user
-    )
-  elif action == 'stop':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
-    rm_pid = format("rm -f {zk_pid_file}")
-    Execute(daemon_cmd,
-            user=params.zk_user
-    )
-    Execute(rm_pid)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2
deleted file mode 100644
index 8830c45..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2
+++ /dev/null
@@ -1,42 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2
deleted file mode 100644
index beb4730..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2
+++ /dev/null
@@ -1,69 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime={{tickTime}}
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit={{initLimit}}
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit={{syncLimit}}
-# the directory where the snapshot is stored.
-dataDir={{zk_data_dir}}
-# the port at which the clients will connect
-clientPort={{clientPort}}
-{% for host in zookeeper_hosts %}
-server.{{loop.index}}={{host}}:2888:3888
-{% endfor %}
-
-{% if security_enabled %}
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-{% endif %}
-
-{% if zoo_cfg_properties_map_length > 0 %}
-# Custom properties
-{% endif %}
-{% for key, value in zoo_cfg_properties_map.iteritems() %}
-{{key}}={{value}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
deleted file mode 100644
index 38f9721..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
deleted file mode 100644
index c3e9505..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{zk_keytab_path}}"
-principal="{{zk_principal}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/stack_advisor.py
deleted file mode 100644
index 3a2347a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/stack_advisor.py
+++ /dev/null
@@ -1,443 +0,0 @@
-#!/usr/bin/env ambari-python-wrap
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import re
-import sys
-from math import ceil
-
-from stack_advisor import DefaultStackAdvisor
-
-class BasePHD3000StackAdvisor(DefaultStackAdvisor):
-
-  def getComponentLayoutValidations(self, services, hosts):
-    """Returns array of Validation objects about issues with hostnames components assigned to"""
-    items = []
-
-    # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
-    hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
-    hostsCount = len(hostsList)
-
-    componentsListList = [service["components"] for service in services["services"]]
-    componentsList = [item for sublist in componentsListList for item in sublist]
-    nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
-    secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
-
-    # Validating cardinality
-    for component in componentsList:
-      if component["StackServiceComponents"]["cardinality"] is not None:
-         componentName = component["StackServiceComponents"]["component_name"]
-         componentDisplayName = component["StackServiceComponents"]["display_name"]
-         componentHostsCount = 0
-         if component["StackServiceComponents"]["hostnames"] is not None:
-           componentHostsCount = len(component["StackServiceComponents"]["hostnames"])
-         cardinality = str(component["StackServiceComponents"]["cardinality"])
-         # cardinality types: null, 1+, 1-2, 1, ALL
-         message = None
-         if "+" in cardinality:
-           hostsMin = int(cardinality[:-1])
-           if componentHostsCount < hostsMin:
-             message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
-         elif "-" in cardinality:
-           nums = cardinality.split("-")
-           hostsMin = int(nums[0])
-           hostsMax = int(nums[1])
-           if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
-             message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
-         elif "ALL" == cardinality:
-           if componentHostsCount != hostsCount:
-             message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
-         else:
-           if componentHostsCount != int(cardinality):
-             message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
-
-         if message is not None:
-           items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
-
-    # Validating host-usage
-    usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
-    usedHostsList = [item for sublist in usedHostsListList for item in sublist]
-    nonUsedHostsList = [item for item in hostsList if item not in usedHostsList]
-    for host in nonUsedHostsList:
-      items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
-
-    return items
-
-  def getServiceConfigurationRecommenderDict(self):
-    return {
-      "YARN": self.recommendYARNConfigurations,
-      "MAPREDUCE2": self.recommendMapReduce2Configurations
-    }
-
-  def putProperty(self, config, configType):
-    config[configType] = {"properties": {}}
-    def appendProperty(key, value):
-      config[configType]["properties"][key] = str(value)
-    return appendProperty
-
-  def recommendYARNConfigurations(self, configurations, clusterData):
-    putYarnProperty = self.putProperty(configurations, "yarn-site")
-    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
-    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
-    putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
-
-  def recommendMapReduce2Configurations(self, configurations, clusterData):
-    putMapredProperty = self.putProperty(configurations, "mapred-site")
-    putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
-    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
-    putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
-    putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
-    putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
-    putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
-    putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
-
-  def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
-
-    hBaseInstalled = False
-    if 'HBASE' in servicesList:
-      hBaseInstalled = True
-
-    cluster = {
-      "cpu": 0,
-      "disk": 0,
-      "ram": 0,
-      "hBaseInstalled": hBaseInstalled,
-      "components": components
-    }
-
-    if len(hosts["items"]) > 0:
-      host = hosts["items"][0]["Hosts"]
-      cluster["cpu"] = host["cpu_count"]
-      cluster["disk"] = len(host["disk_info"])
-      cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
-
-    ramRecommendations = [
-      {"os":1, "hbase":1},
-      {"os":2, "hbase":1},
-      {"os":2, "hbase":2},
-      {"os":4, "hbase":4},
-      {"os":6, "hbase":8},
-      {"os":8, "hbase":8},
-      {"os":8, "hbase":8},
-      {"os":12, "hbase":16},
-      {"os":24, "hbase":24},
-      {"os":32, "hbase":32},
-      {"os":64, "hbase":64}
-    ]
-    index = {
-      cluster["ram"] <= 4: 0,
-      4 < cluster["ram"] <= 8: 1,
-      8 < cluster["ram"] <= 16: 2,
-      16 < cluster["ram"] <= 24: 3,
-      24 < cluster["ram"] <= 48: 4,
-      48 < cluster["ram"] <= 64: 5,
-      64 < cluster["ram"] <= 72: 6,
-      72 < cluster["ram"] <= 96: 7,
-      96 < cluster["ram"] <= 128: 8,
-      128 < cluster["ram"] <= 256: 9,
-      256 < cluster["ram"]: 10
-    }[1]
-    cluster["reservedRam"] = ramRecommendations[index]["os"]
-    cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
-
-    cluster["minContainerSize"] = {
-      cluster["ram"] <= 4: 256,
-      4 < cluster["ram"] <= 8: 512,
-      8 < cluster["ram"] <= 24: 1024,
-      24 < cluster["ram"]: 2048
-    }[1]
-
-    totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
-    if cluster["hBaseInstalled"]:
-      totalAvailableRam -= cluster["hbaseRam"]
-    cluster["totalAvailableRam"] = max(2048, totalAvailableRam * 1024)
-    '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
-    cluster["containers"] = round(max(3,
-                                min(2 * cluster["cpu"],
-                                    min(ceil(1.8 * cluster["disk"]),
-                                            cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
-
-    '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
-    cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
-    '''If greater than 1GB, value will be in multiples of 512.'''
-    if cluster["ramPerContainer"] > 1024:
-      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
-
-    cluster["mapMemory"] = int(cluster["ramPerContainer"])
-    cluster["reduceMemory"] = cluster["ramPerContainer"]
-    cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
-
-    return cluster
-
-  def getConfigurationsValidationItems(self, services, hosts):
-    """Returns array of Validation objects about issues with configuration values provided in services"""
-    items = []
-
-    recommendations = self.recommendConfigurations(services, hosts)
-    recommendedDefaults = recommendations["recommendations"]["blueprint"]["configurations"]
-
-    configurations = services["configurations"]
-    for service in services["services"]:
-      serviceName = service["StackServices"]["service_name"]
-      validator = self.validateServiceConfigurations(serviceName)
-      if validator is not None:
-        siteName = validator[0]
-        method = validator[1]
-        if siteName in recommendedDefaults:
-          siteProperties = getSiteProperties(configurations, siteName)
-          if siteProperties is not None:
-            resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"], configurations)
-            items.extend(resultItems)
-    return items
-
-  def getServiceConfigurationValidators(self):
-    return {
-      "MAPREDUCE2": ["mapred-site", self.validateMapReduce2Configurations],
-      "YARN": ["yarn-site", self.validateYARNConfigurations]
-    }
-
-  def validateServiceConfigurations(self, serviceName):
-    return self.getServiceConfigurationValidators().get(serviceName, None)
-
-  def toConfigurationValidationProblems(self, validationProblems, siteName):
-    result = []
-    for validationProblem in validationProblems:
-      validationItem = validationProblem.get("item", None)
-      if validationItem is not None:
-        problem = {"type": 'configuration', "level": validationItem["level"], "message": validationItem["message"],
-                   "config-type": siteName, "config-name": validationProblem["config-name"] }
-        result.append(problem)
-    return result
-
-  def getWarnItem(self, message):
-    return {"level": "WARN", "message": message}
-
-  def getErrorItem(self, message):
-    return {"level": "ERROR", "message": message}
-
-  def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    value = to_number(properties[propertyName])
-    if value is None:
-      return self.getErrorItem("Value should be integer")
-    defaultValue = to_number(recommendedDefaults[propertyName])
-    if defaultValue is None:
-      return None
-    if value < defaultValue:
-      return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
-    return None
-
-  def validateXmxValue(self, properties, recommendedDefaults, propertyName):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    value = properties[propertyName]
-    defaultValue = recommendedDefaults[propertyName]
-    if defaultValue is None:
-      return self.getErrorItem("Config's default value can't be null or undefined")
-    if not checkXmxValueFormat(value):
-      return self.getErrorItem('Invalid value format')
-    valueInt = formatXmxSizeToBytes(getXmxSize(value))
-    defaultValueXmx = getXmxSize(defaultValue)
-    defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
-    if valueInt < defaultValueInt:
-      return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
-    return None
-
-  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations):
-    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
-                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
-                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
-                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
-                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
-                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
-                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')} ]
-    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
-
-  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations):
-    validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
-                        {"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
-                        {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
-    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
-
-  def getMastersWithMultipleInstances(self):
-    return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']
-
-  def getNotValuableComponents(self):
-    return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
-
-  def getNotPreferableOnServerComponents(self):
-    return ['GANGLIA_SERVER', 'NAGIOS_SERVER']
-
-  def getCardinalitiesDict(self):
-    return {
-      'ZOOKEEPER_SERVER': {"min": 3},
-      'HBASE_MASTER': {"min": 1},
-      }
-
-  def getComponentLayoutSchemes(self):
-    return {
-      'NAMENODE': {"else": 0},
-      'SECONDARY_NAMENODE': {"else": 1},
-      'HBASE_MASTER': {6: 0, 31: 2, "else": 3},
-
-      'HISTORYSERVER': {31: 1, "else": 2},
-      'RESOURCEMANAGER': {31: 1, "else": 2},
-
-      'OOZIE_SERVER': {6: 1, 31: 2, "else": 3},
-
-      'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
-      'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
-      'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
-      }
-
-class PHD3000StackAdvisor(BasePHD3000StackAdvisor):
-
-  def getServiceConfigurationRecommenderDict(self):
-    parentRecommendConfDict = super(PHD3000StackAdvisor, self).getServiceConfigurationRecommenderDict()
-    childRecommendConfDict = {
-      "OOZIE": self.recommendOozieConfigurations,
-      "HIVE": self.recommendHiveConfigurations,
-      "TEZ": self.recommendTezConfigurations
-    }
-    parentRecommendConfDict.update(childRecommendConfDict)
-    return parentRecommendConfDict
-
-  def recommendOozieConfigurations(self, configurations, clusterData):
-    if "FALCON_SERVER" in clusterData["components"]:
-      putMapredProperty = self.putProperty(configurations, "oozie-site")
-      putMapredProperty("oozie.services.ext",
-                        "org.apache.oozie.service.JMSAccessorService," +
-                        "org.apache.oozie.service.PartitionDependencyManagerService," +
-                        "org.apache.oozie.service.HCatAccessorService")
-
-  def recommendHiveConfigurations(self, configurations, clusterData):
-    containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
-    containerSize = min(clusterData['containers'] * clusterData['ramPerContainer'], containerSize)
-    putHiveProperty = self.putProperty(configurations, "hive-site")
-    putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(round(containerSize / 3)) * 1048576)
-    putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(round(0.8 * containerSize)))
-                    + "m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC")
-    putHiveProperty('hive.tez.container.size', containerSize)
-
-  def recommendTezConfigurations(self, configurations, clusterData):
-    putTezProperty = self.putProperty(configurations, "tez-site")
-    putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']))
-    putTezProperty("tez.am.java.opts",
-                   "-server -Xmx" + str(int(0.8 * clusterData["amMemory"]))
-                   + "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
-
-  def getNotPreferableOnServerComponents(self):
-    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER', 'NAGIOS_SERVER']
-
-  def getNotValuableComponents(self):
-    return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']
-
-  def getComponentLayoutSchemes(self):
-    parentSchemes = super(PHD3000StackAdvisor, self).getComponentLayoutSchemes()
-    childSchemes = {
-        'APP_TIMELINE_SERVER': {31: 1, "else": 2},
-        'FALCON_SERVER': {6: 1, 31: 2, "else": 3}
-    }
-    parentSchemes.update(childSchemes)
-    return parentSchemes
-
-  def getServiceConfigurationValidators(self):
-    parentValidators = super(PHD3000StackAdvisor, self).getServiceConfigurationValidators()
-    childValidators = {
-      "HIVE": ["hive-site", self.validateHiveConfigurations],
-      "TEZ": ["tez-site", self.validateTezConfigurations]
-    }
-    parentValidators.update(childValidators)
-    return parentValidators
-
-  def validateHiveConfigurations(self, properties, recommendedDefaults, configurations):
-    validationItems = [ {"config-name": 'hive.tez.container.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.tez.container.size')},
-                        {"config-name": 'hive.tez.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'hive.tez.java.opts')},
-                        {"config-name": 'hive.auto.convert.join.noconditionaltask.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.auto.convert.join.noconditionaltask.size')} ]
-    return self.toConfigurationValidationProblems(validationItems, "hive-site")
-
-  def validateTezConfigurations(self, properties, recommendedDefaults, configurations):
-    validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
-                        {"config-name": 'tez.am.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'tez.am.java.opts')} ]
-    return self.toConfigurationValidationProblems(validationItems, "tez-site")
-
-# Validation helper methods
-def getSiteProperties(configurations, siteName):
-  siteConfig = configurations.get(siteName)
-  if siteConfig is None:
-    return None
-  return siteConfig.get("properties")
-
-def to_number(s):
-  try:
-    return int(re.sub("\D", "", s))
-  except ValueError:
-    return None
-
-def checkXmxValueFormat(value):
-  p = re.compile('-Xmx(\d+)(b|k|m|g|p|t|B|K|M|G|P|T)?')
-  matches = p.findall(value)
-  return len(matches) == 1
-
-def getXmxSize(value):
-  p = re.compile("-Xmx(\d+)(.?)")
-  result = p.findall(value)[0]
-  if len(result) > 1:
-    # result[1] - is a space or size formatter (b|k|m|g etc)
-    return result[0] + result[1].lower()
-  return result[0]
-
-def formatXmxSizeToBytes(value):
-  value = value.lower()
-  if len(value) == 0:
-    return 0
-  modifier = value[-1]
-
-  if modifier == ' ' or modifier in "0123456789":
-    modifier = 'b'
-  m = {
-    modifier == 'b': 1,
-    modifier == 'k': 1024,
-    modifier == 'm': 1024 * 1024,
-    modifier == 'g': 1024 * 1024 * 1024,
-    modifier == 't': 1024 * 1024 * 1024 * 1024,
-    modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024
-    }[1]
-  return to_number(value) * m
-
-def getPort(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def isSecurePort(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False


[09/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index a41e261..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,163 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <displayName>Nagios</displayName>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-           <name>NAGIOS_SERVER</name>
-          <displayName>Nagios Server</displayName>
-           <category>MASTER</category>
-           <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>HIVE/HCAT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-           <commandScript>
-             <script>scripts/nagios_server.py</script>
-             <scriptType>PYTHON</scriptType>
-             <timeout>600</timeout>
-           </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>perl</name>
-            </package>
-            <package>
-              <name>fping</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>nagios3</name>
-            </package>
-            <package>
-              <name>nagios3-common</name>
-            </package>
-            <package>
-              <name>nagios3-dbg</name>
-            </package>
-            <package>
-              <name>nagios3-doc</name>
-            </package>
-            <package>
-              <name>nagios-plugins-extra</name>
-            </package>
-            <package>
-              <name>php5-curl</name>
-            </package>
-            <package>
-              <name>libapache2-mod-php5</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>php</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>php5*-json</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-            <package>
-              <name>php-curl</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5</osFamily>
-          <packages>
-            <package>
-              <name>php-pecl-json.x86_64</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>nagios-env</config-type>
-      </configuration-dependencies>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index 792b25b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,248 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-define("PASSIVE_MODE_STR", "AMBARIPASSIVE=");
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      $long_out = getParameter($object, "long_plugin_output");
-      $skip_if_match=!strncmp($long_out, PASSIVE_MODE_STR, strlen(PASSIVE_MODE_STR));
-
-      if (getParameter($object, "service_description") == $service_name && !$skip_if_match) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "STORM":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_ambari_alerts.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_ambari_alerts.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_ambari_alerts.py
deleted file mode 100644
index 833a798..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_ambari_alerts.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import json
-import traceback
-
-def main():
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host", default="localhost", help="NameNode host")
-  parser.add_option("-n", "--name", dest="alert_name", help="Alert name to check")
-  parser.add_option("-f", "--file", dest="alert_file", help="File containing the alert structure")
-
-  (options, args) = parser.parse_args()
-
-  if options.alert_name is None:
-    print "Alert name is required (--name or -n)"
-    exit(-1)
-
-  if options.alert_file is None:
-    print "Alert file is required (--file or -f)"
-    exit(-1)
-
-  if not os.path.exists(options.alert_file):
-    print "Status is unreported"
-    exit(3)
-
-  try:
-    with open(options.alert_file, 'r') as f:
-      data = json.load(f)
-
-      buf_list = []
-      exit_code = 0
-
-      for_hosts = data[options.alert_name]
-      if for_hosts.has_key(options.host):
-        for host_entry in for_hosts[options.host]:
-          buf_list.append(host_entry['text'])
-          alert_state = host_entry['state']
-          if alert_state == 'CRITICAL' and exit_code < 2:
-            exit_code = 2
-          elif alert_state == 'WARNING' and exit_code < 1:
-            exit_code = 1
-
-      if 0 == len(buf_list):
-        print "Status is not reported"
-        exit(3)
-      else:
-        print ", ".join(buf_list)
-        exit(exit_code)
-      
-  except Exception:
-    traceback.print_exc()
-    exit(3)
-
-if __name__ == "__main__":
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_checkpoint_time.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_checkpoint_time.py
deleted file mode 100644
index 04e8d60..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_checkpoint_time.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import time
-import urllib2
-import json
-
-CRIT_MESSAGE = "CRITICAL: Last checkpoint time is below acceptable. Checkpoint was done {h}h. {m}m. ago"
-WARNING_MESSAGE = "WARNING: Last checkpoint time is below acceptable. Checkpoint was done {h}h. {m}m. ago"
-OK_MESSAGE = "OK: Last checkpoint time"
-WARNING_JMX_MESSAGE = "WARNING: NameNode JMX not accessible"
-
-def main():
-  current_time = int(round(time.time() * 1000))
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="host",
-                    default="localhost", help="NameNode host")
-  parser.add_option("-p", "--port", dest="port",
-                    default="50070", help="NameNode jmx port")
-  parser.add_option("-s", "--ssl-enabled", dest="is_ssl_enabled",
-                    default=False, help="SSL Enabled")  
-  parser.add_option("-w", "--warning", dest="warning",
-                    default="200", help="Percent for warning alert")
-  parser.add_option("-c", "--critical", dest="crit",
-                    default="200", help="Percent for critical alert")
-  parser.add_option("-t", "--period", dest="period",
-                    default="21600", help="Period time")
-  parser.add_option("-x", "--txns", dest="txns",
-                    default="1000000",
-                    help="CheckpointNode will create a checkpoint of the namespace every 'dfs.namenode.checkpoint.txns'")
-  
-  (options, args) = parser.parse_args()
-
-  scheme = "http"
-  if options.is_ssl_enabled == "true":
-    scheme = "https"
-
-  host = get_available_nn_host(options,scheme)
-
-  last_checkpoint_time_qry = "{scheme}://{host}:{port}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(
-      scheme=scheme, host=host, port=options.port)
-
-  print last_checkpoint_time_qry
-    
-  last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,"LastCheckpointTime"))
-
-  journal_transaction_info_qry = "{scheme}://{host}:{port}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(
-      scheme=scheme, host=host, port=options.port)
-  
-  journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,"JournalTransactionInfo")
-  journal_transaction_info_dict = json.loads(journal_transaction_info)
-
-  last_txid = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
-  most_txid = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
-
-  delta = (current_time - last_checkpoint_time)/1000
-
-  if ((last_txid - most_txid) > int(options.txns)) and (float(delta) / int(options.period)*100 >= int(options.crit)):
-    print CRIT_MESSAGE.format(h=get_time(delta)['h'], m=get_time(delta)['m'])
-    exit(2)
-  elif ((last_txid - most_txid) > int(options.txns)) and (float(delta) / int(options.period)*100 >= int(options.warning)):
-    print WARNING_MESSAGE.format(h=get_time(delta)['h'], m=get_time(delta)['m'])
-    exit(1)
-  else:
-    print OK_MESSAGE
-    exit(0)
-
-
-def get_time(delta):
-  h = int(delta/3600)
-  m = int((delta % 3600)/60)
-  return {'h':h, 'm':m}
-
-
-def get_value_from_jmx(qry, property):
-  try:
-    response = urllib2.urlopen(qry)
-    data=response.read()
-  except Exception:
-    print WARNING_JMX_MESSAGE
-    exit(1)
-
-  data_dict = json.loads(data)
-  return data_dict["beans"][0][property]
-
-
-def get_available_nn_host(options, scheme):
-  nn_hosts = options.host.split(" ")
-  for nn_host in nn_hosts:
-    try:
-      urllib2.urlopen("{scheme}://{host}:{port}/jmx".format(scheme=scheme, host=nn_host, port=options.port))
-      return nn_host
-    except Exception:
-      pass
-  print WARNING_JMX_MESSAGE
-  exit(1)
-
-
-if __name__ == "__main__":
-  main()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.php
deleted file mode 100644
index 0744e38..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }
-
-    $cpu_load = $object['SystemCpuLoad'];
-
-    if (!isset($object['SystemCpuLoad']) || $cpu_load < 0.0) {
-      echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-      exit(1);
-    }
-
-    $cpu_count = $object['AvailableProcessors'];
-
-    $cpu_percent = $cpu_load*100;
-  }
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu_ha.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu_ha.php
deleted file mode 100644
index 91a7c64..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_cpu_ha.php
+++ /dev/null
@@ -1,116 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:k:r:t:u:e");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=java.lang:type=OperatingSystem",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $cpu_load = $jmx_response['SystemCpuLoad'];
-
-  if (!isset($jmx_response['SystemCpuLoad']) || $cpu_load < 0.0) {
-    echo "WARNING: Data unavailable, SystemCpuLoad is not set\n";
-    exit(1);
-  }
-
-  $cpu_count = $jmx_response['AvailableProcessors'];
-
-  $cpu_percent = $cpu_load*100;
-
-  $out_msg = $cpu_count . " CPU, load " . number_format($cpu_percent, 1, '.', '') . '%';
-
-  if ($cpu_percent > $crit) {
-    echo $out_msg . ' > ' . $crit . "% : CRITICAL\n";
-    exit(2);
-  }
-  if ($cpu_percent > $warn) {
-    echo $out_msg . ' > ' . $warn . "% : WARNING\n";
-    exit(1);
-  }
-
-  echo $out_msg . ' < ' . $warn . "% : OK\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab_path -r principal_name -t kinit_path -u security_enabled -e ssl_enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 3693aa0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,102 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > 0) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -s <namenode bean name> -k keytab path -r principal name -t kinit path -u security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_thrift_port.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_thrift_port.py
deleted file mode 100644
index c9414f7..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hive_thrift_port.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import os
-import optparse
-import json
-import traceback
-from resource_management import *
-from time import time
-
-
-OK_MESSAGE = "TCP OK - %.3f second response time on port %s"
-CRITICAL_MESSAGE = "Connection to %s on port %s failed"
-
-def main():
-
-  parser = optparse.OptionParser()
-
-  parser.add_option("-H", "--host", dest="address", help="Hive thrift host")
-  parser.add_option("-p", "--port", type="int", dest="port", help="Hive thrift port")
-  parser.add_option("--security-enabled", action="store_true", dest="security_enabled")
-
-  (options, args) = parser.parse_args()
-
-  if options.address is None:
-    print "Specify hive thrift host (--host or -H)"
-    exit(-1)
-
-  if options.port is None:
-    print "Specify hive thrift port (--port or -p)"
-    exit(-1)
-
-  if options.security_enabled:
-    security_enabled = options.security_enabled
-  else:
-    security_enabled = False
-
-  address = options.address
-  port = options.port
-
-  starttime = time()
-  if check_thrift_port_sasl(address, port, security_enabled=security_enabled):
-    timetaken = time() - starttime
-    print OK_MESSAGE % (timetaken, port)
-    exit(0)
-  else:
-    print CRITICAL_MESSAGE % (address, port)
-    exit(2)
-
-
-if __name__ == "__main__":
-  main()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 3f9243a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 83c1aca..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  export no_proxy=$nn
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index eedcd62..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-export no_proxy=$HOST
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency_ha.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency_ha.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency_ha.php
deleted file mode 100644
index 3e7616c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_rpcq_latency_ha.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  $jmx_response_available = false;
-  $jmx_response;
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $object = $json_array['beans'][0];
-
-    if (count($object) > 0) {
-      $jmx_response_available = true;
-      $jmx_response = $object;
-    }
-  }
-
-  if ($jmx_response_available === false) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }
-
-  $RpcQueueTime_avg_time = round($jmx_response['RpcQueueTime_avg_time'], 2);
-  $RpcProcessingTime_avg_time = round($jmx_response['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 3e2ba0f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-export no_proxy=$HOST
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index 7044878..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-host=$2
-port=$3
-
-checkurl () {
-  url=$1
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-storm_ui)
-    rmweburl="http://$host:$port"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then
-      echo "WARNING: Storm Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-falconserver)
-    hsweburl="http://$host:$port/"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then
-      echo "WARNING: FalconServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2|falconserver|storm_ui]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui_ha.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui_ha.sh
deleted file mode 100644
index d9a814d..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_webui_ha.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-service=$1
-hosts=$2
-port=$3
-
-checkurl () {
-  url=$1
-  host=$2
-  export no_proxy=$host
-  curl $url -k -o /dev/null
-  echo $?
-}
-
-if [[ -z "$service" || -z "$hosts" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui_ha.sh service_name, host_name";
-  exit 3;
-fi
-
-case "$service" in
-resourcemanager)
-    url_end_part="/cluster"
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [resourcemanager]"
-   exit 3
-   ;;
-esac
-
-OIFS="$IFS"
-IFS=','
-read -a hosts_array <<< "${hosts}"
-IFS="$OIFS"
-
-for host in "${hosts_array[@]}"
-do
-  weburl="http://${host}:${port}${url_end_part}"
-  if [[ `checkurl "$weburl" "$host"` -eq 0 ]]; then
-    echo "OK: Successfully accessed $service Web UI"
-    exit 0;
-  fi
-done
-
-echo "WARNING: $service Web UI not accessible : $weburl";
-exit 1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
deleted file mode 100644
index 87717d2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_mon_nagios_addons.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-Alias /ambarinagios /usr/share/hdp
-<Directory /usr/share/hdp>
-  Options None
-  AllowOverride None
-  Order allow,deny
-  Allow from all
-</Directory>


[23/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/930d4499
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/930d4499
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/930d4499

Branch: refs/heads/trunk
Commit: 930d449997c3b2fb9754bfb7c59d14f5e2f04164
Parents: a236102
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Sun Aug 16 22:13:19 2015 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Sun Aug 16 22:13:19 2015 -0700

----------------------------------------------------------------------
 .../3.0.0.0/blueprints/multinode-default.json   |   183 -
 .../3.0.0.0/blueprints/singlenode-default.json  |   137 -
 .../PHD/3.0.0.0/configuration/cluster-env.xml   |    56 -
 .../3.0.0.0/hooks/after-INSTALL/scripts/hook.py |    35 -
 .../hooks/after-INSTALL/scripts/params.py       |    73 -
 .../scripts/shared_initialization.py            |    38 -
 .../hooks/before-ANY/files/changeToSecureUid.sh |    50 -
 .../3.0.0.0/hooks/before-ANY/scripts/hook.py    |    35 -
 .../3.0.0.0/hooks/before-ANY/scripts/params.py  |   134 -
 .../before-ANY/scripts/shared_initialization.py |   114 -
 .../hooks/before-INSTALL/scripts/hook.py        |    38 -
 .../hooks/before-INSTALL/scripts/params.py      |   122 -
 .../scripts/repo_initialization.py              |    57 -
 .../scripts/shared_initialization.py            |    63 -
 .../before-INSTALL/templates/repo_suse_rhel.j2  |     7 -
 .../before-INSTALL/templates/repo_ubuntu.j2     |     1 -
 .../hooks/before-RESTART/scripts/hook.py        |    29 -
 .../hooks/before-START/files/checkForFormat.sh  |    65 -
 .../before-START/files/task-log4j.properties    |   134 -
 .../3.0.0.0/hooks/before-START/scripts/hook.py  |    37 -
 .../hooks/before-START/scripts/params.py        |   158 -
 .../scripts/shared_initialization.py            |   177 -
 .../templates/commons-logging.properties.j2     |    43 -
 .../templates/exclude_hosts_list.j2             |    21 -
 .../templates/hadoop-metrics2.properties.j2     |    65 -
 .../before-START/templates/health_check-v2.j2   |    81 -
 .../before-START/templates/health_check.j2      |   109 -
 .../templates/include_hosts_list.j2             |    21 -
 .../resources/stacks/PHD/3.0.0.0/metainfo.xml   |    22 -
 .../stacks/PHD/3.0.0.0/repos/repoinfo.xml       |    33 -
 .../stacks/PHD/3.0.0.0/role_command_order.json  |    75 -
 .../services/FLUME/configuration/flume-conf.xml |    31 -
 .../services/FLUME/configuration/flume-env.xml  |    78 -
 .../FLUME/configuration/flume-log4j.xml         |    31 -
 .../PHD/3.0.0.0/services/FLUME/metainfo.xml     |    69 -
 .../PHD/3.0.0.0/services/FLUME/metrics.json     |   720 -
 .../services/FLUME/package/scripts/flume.py     |   255 -
 .../FLUME/package/scripts/flume_check.py        |    40 -
 .../FLUME/package/scripts/flume_handler.py      |   121 -
 .../services/FLUME/package/scripts/params.py    |    70 -
 .../FLUME/package/templates/flume.conf.j2       |    24 -
 .../FLUME/package/templates/log4j.properties.j2 |    67 -
 .../GANGLIA/configuration/ganglia-env.xml       |    77 -
 .../PHD/3.0.0.0/services/GANGLIA/metainfo.xml   |   127 -
 .../GANGLIA/package/files/checkGmetad.sh        |    37 -
 .../GANGLIA/package/files/checkGmond.sh         |    62 -
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 -
 .../services/GANGLIA/package/files/gmetad.init  |    73 -
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 -
 .../services/GANGLIA/package/files/gmond.init   |    73 -
 .../services/GANGLIA/package/files/gmondLib.sh  |   539 -
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 -
 .../GANGLIA/package/files/setupGanglia.sh       |   141 -
 .../GANGLIA/package/files/startGmetad.sh        |    68 -
 .../GANGLIA/package/files/startGmond.sh         |    85 -
 .../GANGLIA/package/files/startRrdcached.sh     |    79 -
 .../GANGLIA/package/files/stopGmetad.sh         |    43 -
 .../services/GANGLIA/package/files/stopGmond.sh |    54 -
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 -
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 -
 .../GANGLIA/package/scripts/functions.py        |    31 -
 .../services/GANGLIA/package/scripts/ganglia.py |    97 -
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   236 -
 .../package/scripts/ganglia_monitor_service.py  |    27 -
 .../GANGLIA/package/scripts/ganglia_server.py   |   119 -
 .../package/scripts/ganglia_server_service.py   |    27 -
 .../services/GANGLIA/package/scripts/params.py  |   160 -
 .../GANGLIA/package/scripts/status_params.py    |    25 -
 .../GANGLIA/package/templates/ganglia.conf.j2   |    34 -
 .../package/templates/gangliaClusters.conf.j2   |    43 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    46 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    85 -
 .../GANGLIA/package/templates/rrd.py.j2         |   361 -
 .../services/HBASE/configuration/hbase-env.xml  |   137 -
 .../HBASE/configuration/hbase-log4j.xml         |   143 -
 .../HBASE/configuration/hbase-policy.xml        |    53 -
 .../services/HBASE/configuration/hbase-site.xml |   331 -
 .../PHD/3.0.0.0/services/HBASE/metainfo.xml     |   139 -
 .../PHD/3.0.0.0/services/HBASE/metrics.json     | 13655 -----------------
 .../HBASE/package/files/draining_servers.rb     |   164 -
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    34 -
 .../services/HBASE/package/scripts/__init__.py  |    19 -
 .../services/HBASE/package/scripts/functions.py |    40 -
 .../services/HBASE/package/scripts/hbase.py     |   144 -
 .../HBASE/package/scripts/hbase_client.py       |    43 -
 .../HBASE/package/scripts/hbase_decommission.py |    74 -
 .../HBASE/package/scripts/hbase_master.py       |    70 -
 .../HBASE/package/scripts/hbase_regionserver.py |    66 -
 .../HBASE/package/scripts/hbase_service.py      |    51 -
 .../services/HBASE/package/scripts/params.py    |   137 -
 .../HBASE/package/scripts/service_check.py      |    79 -
 .../HBASE/package/scripts/status_params.py      |    26 -
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    81 -
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    80 -
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    44 -
 .../package/templates/hbase_client_jaas.conf.j2 |    23 -
 .../templates/hbase_grant_permissions.j2        |    39 -
 .../package/templates/hbase_master_jaas.conf.j2 |    26 -
 .../templates/hbase_regionserver_jaas.conf.j2   |    26 -
 .../HBASE/package/templates/regionservers.j2    |    20 -
 .../services/HDFS/configuration/core-site.xml   |   180 -
 .../services/HDFS/configuration/hadoop-env.xml  |   200 -
 .../HDFS/configuration/hadoop-policy.xml        |   134 -
 .../services/HDFS/configuration/hdfs-log4j.xml  |   201 -
 .../services/HDFS/configuration/hdfs-site.xml   |   430 -
 .../PHD/3.0.0.0/services/HDFS/metainfo.xml      |   226 -
 .../PHD/3.0.0.0/services/HDFS/metrics.json      |  7860 ----------
 .../HDFS/package/files/checkForFormat.sh        |    70 -
 .../services/HDFS/package/files/checkWebUI.py   |    53 -
 .../scripts/balancer-emulator/balancer-err.log  |  1032 --
 .../scripts/balancer-emulator/balancer.log      |    29 -
 .../scripts/balancer-emulator/hdfs-command.py   |    45 -
 .../services/HDFS/package/scripts/datanode.py   |    59 -
 .../services/HDFS/package/scripts/hdfs.py       |    80 -
 .../HDFS/package/scripts/hdfs_client.py         |    53 -
 .../HDFS/package/scripts/hdfs_datanode.py       |    56 -
 .../HDFS/package/scripts/hdfs_namenode.py       |   160 -
 .../HDFS/package/scripts/hdfs_rebalance.py      |   130 -
 .../HDFS/package/scripts/hdfs_snamenode.py      |    45 -
 .../HDFS/package/scripts/journalnode.py         |    73 -
 .../services/HDFS/package/scripts/namenode.py   |   134 -
 .../services/HDFS/package/scripts/params.py     |   235 -
 .../HDFS/package/scripts/service_check.py       |   120 -
 .../services/HDFS/package/scripts/snamenode.py  |    65 -
 .../HDFS/package/scripts/status_params.py       |    31 -
 .../services/HDFS/package/scripts/utils.py      |   149 -
 .../services/HDFS/package/scripts/zkfc_slave.py |    64 -
 .../package/templates/exclude_hosts_list.j2     |    21 -
 .../HDFS/package/templates/hdfs.conf.j2         |    35 -
 .../services/HDFS/package/templates/slaves.j2   |    21 -
 .../services/HIVE/configuration/hcat-env.xml    |    57 -
 .../services/HIVE/configuration/hive-env.xml    |   134 -
 .../HIVE/configuration/hive-exec-log4j.xml      |   111 -
 .../services/HIVE/configuration/hive-log4j.xml  |   120 -
 .../services/HIVE/configuration/hive-site.xml   |   538 -
 .../services/HIVE/configuration/webhcat-env.xml |    54 -
 .../HIVE/configuration/webhcat-site.xml         |   138 -
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |   777 -
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |   718 -
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    |  1406 --
 .../PHD/3.0.0.0/services/HIVE/metainfo.xml      |   280 -
 .../services/HIVE/package/files/addMysqlUser.sh |    41 -
 .../HIVE/package/files/addPostgreSQLUser.sh     |    44 -
 .../services/HIVE/package/files/hcatSmoke.sh    |    36 -
 .../services/HIVE/package/files/hiveSmoke.sh    |    24 -
 .../services/HIVE/package/files/hiveserver2.sql |    23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |    32 -
 .../services/HIVE/package/files/pigSmoke.sh     |    18 -
 .../HIVE/package/files/startMetastore.sh        |    23 -
 .../HIVE/package/files/templetonSmoke.sh        |    96 -
 .../services/HIVE/package/scripts/__init__.py   |    19 -
 .../services/HIVE/package/scripts/hcat.py       |    58 -
 .../HIVE/package/scripts/hcat_client.py         |    43 -
 .../HIVE/package/scripts/hcat_service_check.py  |    80 -
 .../services/HIVE/package/scripts/hive.py       |   216 -
 .../HIVE/package/scripts/hive_client.py         |    42 -
 .../HIVE/package/scripts/hive_metastore.py      |    64 -
 .../HIVE/package/scripts/hive_server.py         |    66 -
 .../HIVE/package/scripts/hive_service.py        |   106 -
 .../HIVE/package/scripts/install_jars.py        |   108 -
 .../HIVE/package/scripts/mysql_server.py        |    70 -
 .../HIVE/package/scripts/mysql_service.py       |    46 -
 .../services/HIVE/package/scripts/params.py     |   283 -
 .../HIVE/package/scripts/postgresql_server.py   |   113 -
 .../HIVE/package/scripts/postgresql_service.py  |    41 -
 .../HIVE/package/scripts/service_check.py       |    46 -
 .../HIVE/package/scripts/status_params.py       |    38 -
 .../services/HIVE/package/scripts/webhcat.py    |   131 -
 .../HIVE/package/scripts/webhcat_server.py      |    53 -
 .../HIVE/package/scripts/webhcat_service.py     |    40 -
 .../package/scripts/webhcat_service_check.py    |    41 -
 .../package/templates/startHiveserver2.sh.j2    |    29 -
 .../NAGIOS/configuration/nagios-env.xml         |    53 -
 .../PHD/3.0.0.0/services/NAGIOS/metainfo.xml    |   163 -
 .../NAGIOS/package/files/check_aggregate.php    |   248 -
 .../NAGIOS/package/files/check_ambari_alerts.py |    80 -
 .../package/files/check_checkpoint_time.py      |   123 -
 .../services/NAGIOS/package/files/check_cpu.php |   109 -
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 -
 .../NAGIOS/package/files/check_cpu_ha.php       |   116 -
 .../package/files/check_datanode_storage.php    |   100 -
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   102 -
 .../package/files/check_hdfs_capacity.php       |   109 -
 .../files/check_hive_metastore_status.sh        |    45 -
 .../package/files/check_hive_thrift_port.py     |    72 -
 .../NAGIOS/package/files/check_hue_status.sh    |    31 -
 .../files/check_mapred_local_dir_used.sh        |    34 -
 .../package/files/check_name_dir_status.php     |    93 -
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    83 -
 .../package/files/check_nodemanager_health.sh   |    45 -
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 -
 .../package/files/check_rpcq_latency_ha.php     |   115 -
 .../package/files/check_templeton_status.sh     |    46 -
 .../NAGIOS/package/files/check_webui.sh         |   103 -
 .../NAGIOS/package/files/check_webui_ha.sh      |    64 -
 .../package/files/hdp_mon_nagios_addons.conf    |    24 -
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 -
 .../services/NAGIOS/package/files/mm_wrapper.py |   326 -
 .../NAGIOS/package/files/nagios_alerts.php      |   513 -
 .../services/NAGIOS/package/files/sys_logger.py |   197 -
 .../NAGIOS/package/scripts/functions.py         |    47 -
 .../services/NAGIOS/package/scripts/nagios.py   |   109 -
 .../NAGIOS/package/scripts/nagios_server.py     |   111 -
 .../package/scripts/nagios_server_config.py     |    99 -
 .../NAGIOS/package/scripts/nagios_service.py    |   103 -
 .../services/NAGIOS/package/scripts/params.py   |   366 -
 .../NAGIOS/package/scripts/status_params.py     |    29 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |   109 -
 .../package/templates/hadoop-commands.cfg.j2    |   166 -
 .../package/templates/hadoop-hostgroups.cfg.j2  |    33 -
 .../package/templates/hadoop-hosts.cfg.j2       |    53 -
 .../templates/hadoop-servicegroups.cfg.j2       |   113 -
 .../package/templates/hadoop-services.cfg.j2    |   791 -
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1365 --
 .../NAGIOS/package/templates/nagios.conf.j2     |    84 -
 .../services/NAGIOS/package/templates/nagios.j2 |   164 -
 .../NAGIOS/package/templates/resource.cfg.j2    |    51 -
 .../services/OOZIE/configuration/oozie-env.xml  |   129 -
 .../OOZIE/configuration/oozie-log4j.xml         |    97 -
 .../services/OOZIE/configuration/oozie-site.xml |   312 -
 .../PHD/3.0.0.0/services/OOZIE/metainfo.xml     |   154 -
 .../services/OOZIE/package/files/oozieSmoke2.sh |   112 -
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 -
 .../services/OOZIE/package/scripts/oozie.py     |   152 -
 .../OOZIE/package/scripts/oozie_client.py       |    43 -
 .../OOZIE/package/scripts/oozie_server.py       |    56 -
 .../OOZIE/package/scripts/oozie_service.py      |    74 -
 .../services/OOZIE/package/scripts/params.py    |   164 -
 .../OOZIE/package/scripts/service_check.py      |    60 -
 .../OOZIE/package/scripts/status_params.py      |    26 -
 .../package/templates/catalina.properties.j2    |    81 -
 .../package/templates/oozie-log4j.properties.j2 |    92 -
 .../services/PIG/configuration/pig-env.xml      |    38 -
 .../services/PIG/configuration/pig-log4j.xml    |    62 -
 .../PIG/configuration/pig-properties.xml        |    92 -
 .../PHD/3.0.0.0/services/PIG/metainfo.xml       |    85 -
 .../services/PIG/package/files/pigSmoke.sh      |    18 -
 .../services/PIG/package/scripts/params.py      |    57 -
 .../3.0.0.0/services/PIG/package/scripts/pig.py |    59 -
 .../services/PIG/package/scripts/pig_client.py  |    41 -
 .../PIG/package/scripts/service_check.py        |    69 -
 .../YARN/configuration-mapred/mapred-env.xml    |    65 -
 .../YARN/configuration-mapred/mapred-site.xml   |   360 -
 .../YARN/configuration/capacity-scheduler.xml   |   132 -
 .../services/YARN/configuration/yarn-env.xml    |   184 -
 .../services/YARN/configuration/yarn-log4j.xml  |    71 -
 .../services/YARN/configuration/yarn-site.xml   |   413 -
 .../PHD/3.0.0.0/services/YARN/metainfo.xml      |   249 -
 .../PHD/3.0.0.0/services/YARN/metrics.json      |  5354 -------
 .../files/validateYarnComponentStatus.py        |   170 -
 .../services/YARN/package/scripts/__init__.py   |    20 -
 .../scripts/application_timeline_server.py      |    57 -
 .../YARN/package/scripts/historyserver.py       |    53 -
 .../package/scripts/mapred_service_check.py     |    80 -
 .../YARN/package/scripts/mapreduce2_client.py   |    42 -
 .../YARN/package/scripts/nodemanager.py         |    59 -
 .../services/YARN/package/scripts/params.py     |   174 -
 .../YARN/package/scripts/resourcemanager.py     |   101 -
 .../services/YARN/package/scripts/service.py    |    75 -
 .../YARN/package/scripts/service_check.py       |    68 -
 .../YARN/package/scripts/status_params.py       |    36 -
 .../services/YARN/package/scripts/yarn.py       |   238 -
 .../YARN/package/scripts/yarn_client.py         |    42 -
 .../package/templates/container-executor.cfg.j2 |    40 -
 .../package/templates/exclude_hosts_list.j2     |    21 -
 .../YARN/package/templates/mapreduce.conf.j2    |    35 -
 .../package/templates/taskcontroller.cfg.j2     |    38 -
 .../YARN/package/templates/yarn.conf.j2         |    35 -
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |    85 -
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |   101 -
 .../PHD/3.0.0.0/services/ZOOKEEPER/metainfo.xml |    89 -
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 -
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 -
 .../ZOOKEEPER/package/files/zkService.sh        |    26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 -
 .../ZOOKEEPER/package/scripts/__init__.py       |    20 -
 .../ZOOKEEPER/package/scripts/params.py         |    86 -
 .../ZOOKEEPER/package/scripts/service_check.py  |    46 -
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   110 -
 .../package/scripts/zookeeper_client.py         |    42 -
 .../package/scripts/zookeeper_server.py         |    54 -
 .../package/scripts/zookeeper_service.py        |    42 -
 .../package/templates/configuration.xsl.j2      |    42 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    69 -
 .../templates/zookeeper_client_jaas.conf.j2     |    23 -
 .../package/templates/zookeeper_jaas.conf.j2    |    26 -
 .../PHD/3.0.0.0/services/stack_advisor.py       |   443 -
 289 files changed, 60031 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/multinode-default.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/multinode-default.json
deleted file mode 100644
index 0c871c2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/multinode-default.json
+++ /dev/null
@@ -1,183 +0,0 @@
-{
-    "Blueprints": {
-        "stack_name": "PHD", 
-        "stack_version": "3.0.0.0", 
-        "blueprint_name": "blueprint-multinode-default"
-    }, 
-    "host_groups": [
-        {
-            "cardinality": "1", 
-            "name": "master_1", 
-            "components": [
-                {
-                    "name": "NAMENODE"
-                }, 
-                {
-                    "name": "ZOOKEEPER_SERVER"
-                }, 
-                {
-                    "name": "HBASE_MASTER"
-                }, 
-                {
-                    "name": "GANGLIA_SERVER"
-                }, 
-                {
-                    "name": "HDFS_CLIENT"
-                }, 
-                {
-                    "name": "YARN_CLIENT"
-                }, 
-                {
-                    "name": "HCAT"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }
-            ]
-        }, 
-        {
-            "cardinality": "1", 
-            "name": "master_2", 
-            "components": [
-                {
-                    "name": "ZOOKEEPER_CLIENT"
-                }, 
-                {
-                    "name": "HISTORYSERVER"
-                }, 
-                {
-                    "name": "HIVE_SERVER"
-                }, 
-                {
-                    "name": "SECONDARY_NAMENODE"
-                }, 
-                {
-                    "name": "HIVE_METASTORE"
-                }, 
-                {
-                    "name": "HDFS_CLIENT"
-                }, 
-                {
-                    "name": "HIVE_CLIENT"
-                }, 
-                {
-                    "name": "YARN_CLIENT"
-                }, 
-                {
-                    "name": "MYSQL_SERVER"
-                }, 
-                {
-                    "name": "POSTGRESQL_SERVER"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }, 
-                {
-                    "name": "WEBHCAT_SERVER"
-                }
-            ]
-        }, 
-        {
-            "cardinality": "1", 
-            "name": "master_3", 
-            "components": [
-                {
-                    "name": "RESOURCEMANAGER"
-                }, 
-                {
-                    "name": "ZOOKEEPER_SERVER"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }
-            ]
-        }, 
-        {
-            "cardinality": "1", 
-            "name": "master_4", 
-            "components": [
-                {
-                    "name": "OOZIE_SERVER"
-                }, 
-                {
-                    "name": "ZOOKEEPER_SERVER"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }
-            ]
-        }, 
-        {
-            "cardinality": "${slavesCount}", 
-            "name": "slave", 
-            "components": [
-                {
-                    "name": "HBASE_REGIONSERVER"
-                }, 
-                {
-                    "name": "NODEMANAGER"
-                }, 
-                {
-                    "name": "DATANODE"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }
-            ]
-        }, 
-        {
-            "cardinality": "1", 
-            "name": "gateway", 
-            "components": [
-                {
-                    "name": "AMBARI_SERVER"
-                }, 
-                {
-                    "name": "NAGIOS_SERVER"
-                }, 
-                {
-                    "name": "ZOOKEEPER_CLIENT"
-                }, 
-                {
-                    "name": "PIG"
-                }, 
-                {
-                    "name": "OOZIE_CLIENT"
-                }, 
-                {
-                    "name": "HBASE_CLIENT"
-                }, 
-                {
-                    "name": "HCAT"
-                }, 
-                {
-                    "name": "SQOOP"
-                }, 
-                {
-                    "name": "HDFS_CLIENT"
-                }, 
-                {
-                    "name": "HIVE_CLIENT"
-                }, 
-                {
-                    "name": "YARN_CLIENT"
-                }, 
-                {
-                    "name": "MAPREDUCE2_CLIENT"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }
-            ]
-        }
-    ], 
-    "configurations": [
-        {
-            "nagios-env": {
-                "nagios_contact": "admin@localhost"
-            }
-        }
-    ]
-}
-    }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/singlenode-default.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/singlenode-default.json
deleted file mode 100644
index 9e4881a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/blueprints/singlenode-default.json
+++ /dev/null
@@ -1,137 +0,0 @@
-{
-    "Blueprints": {
-        "stack_name": "PHD", 
-        "stack_version": "3.0.0.0", 
-        "blueprint_name": "blueprint-singlenode-default"
-    }, 
-    "host_groups": [
-        {
-            "cardinality": "1", 
-            "name": "host_group_1", 
-            "components": [
-                {
-                    "name": "STORM_REST_API"
-                }, 
-                {
-                    "name": "PIG"
-                }, 
-                {
-                    "name": "HISTORYSERVER"
-                }, 
-                {
-                    "name": "HBASE_REGIONSERVER"
-                }, 
-                {
-                    "name": "OOZIE_CLIENT"
-                }, 
-                {
-                    "name": "HBASE_CLIENT"
-                }, 
-                {
-                    "name": "NAMENODE"
-                }, 
-                {
-                    "name": "SUPERVISOR"
-                }, 
-                {
-                    "name": "FALCON_SERVER"
-                }, 
-                {
-                    "name": "HCAT"
-                }, 
-                {
-                    "name": "AMBARI_SERVER"
-                }, 
-                {
-                    "name": "APP_TIMELINE_SERVER"
-                }, 
-                {
-                    "name": "HDFS_CLIENT"
-                }, 
-                {
-                    "name": "HIVE_CLIENT"
-                }, 
-                {
-                    "name": "NODEMANAGER"
-                }, 
-                {
-                    "name": "DATANODE"
-                }, 
-                {
-                    "name": "WEBHCAT_SERVER"
-                }, 
-                {
-                    "name": "RESOURCEMANAGER"
-                }, 
-                {
-                    "name": "ZOOKEEPER_SERVER"
-                }, 
-                {
-                    "name": "ZOOKEEPER_CLIENT"
-                }, 
-                {
-                    "name": "STORM_UI_SERVER"
-                }, 
-                {
-                    "name": "HBASE_MASTER"
-                }, 
-                {
-                    "name": "HIVE_SERVER"
-                }, 
-                {
-                    "name": "OOZIE_SERVER"
-                }, 
-                {
-                    "name": "FALCON_CLIENT"
-                }, 
-                {
-                    "name": "NAGIOS_SERVER"
-                }, 
-                {
-                    "name": "SECONDARY_NAMENODE"
-                }, 
-                {
-                    "name": "TEZ_CLIENT"
-                }, 
-                {
-                    "name": "HIVE_METASTORE"
-                }, 
-                {
-                    "name": "GANGLIA_SERVER"
-                }, 
-                {
-                    "name": "SQOOP"
-                }, 
-                {
-                    "name": "YARN_CLIENT"
-                }, 
-                {
-                    "name": "MAPREDUCE2_CLIENT"
-                }, 
-                {
-                    "name": "MYSQL_SERVER"
-                }, 
-                {
-                    "name": "POSTGRESQL_SERVER"
-                }, 
-                {
-                    "name": "GANGLIA_MONITOR"
-                }, 
-                {
-                    "name": "DRPC_SERVER"
-                }, 
-                {
-                    "name": "NIMBUS"
-                }
-            ]
-        }
-    ], 
-    "configurations": [
-        {
-            "nagios-env": {
-                "nagios_contact": "admin@localhost"
-            }
-        }
-    ]
-}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/configuration/cluster-env.xml
deleted file mode 100644
index d41ff98..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/configuration/cluster-env.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-    <property>
-        <name>security_enabled</name>
-        <value>false</value>
-        <description>Hadoop Security</description>
-    </property>
-    <property>
-        <name>kerberos_domain</name>
-        <value>EXAMPLE.COM</value>
-        <description>Kerberos realm.</description>
-    </property>
-    <property>
-        <name>ignore_groupsusers_create</name>
-        <value>false</value>
-        <description>Whether to ignore failures on users and group creation</description>
-    </property>
-    <property>
-        <name>smokeuser</name>
-        <value>ambari-qa</value>
-        <property-type>USER</property-type>
-        <description>User executing service checks</description>
-    </property>
-    <property>
-        <name>smokeuser_keytab</name>
-        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
-        <description>Path to smoke test user keytab file</description>
-    </property>
-    <property>
-        <name>user_group</name>
-        <value>hadoop</value>
-        <property-type>GROUP</property-type>
-        <description>Hadoop user group.</description>
-    </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100644
index 16fe7dd..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#Hook for hosts with only client without other components
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_phd_install_directory()
-    setup_config()
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/params.py
deleted file mode 100644
index e2c47bc..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-
-config = Script.get_config()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  mapreduce_libs_path = "/usr/phd/current/hadoop-mapreduce-client/*"
-  hadoop_libexec_dir = "/usr/phd/current/hadoop-client/libexec"
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-versioned_phd_root = '/usr/phd/current'
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-#java params
-java_home = config['hostLevelParams']['java_home']
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/phd-utils"
-else:
-  jsvc_path = "/usr/lib/phd-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 2745606..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management import *
-
-def setup_phd_install_directory():
-  import params
-  if params.rpm_version:
-    Execute(format('ambari-python-wrap /usr/bin/phd-select set all `ambari-python-wrap /usr/bin/phd-select versions | grep ^{rpm_version}- | tail -1`'),
-            only_if=format('ls -d /usr/phd/{rpm_version}-*')
-    )
-
-def setup_config():
-  import params
-  if params.has_namenode:
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100644
index 154c1c0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/hook.py
deleted file mode 100644
index 1fd36d6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-    
-    setup_jce()
-    setup_users()
-    setup_hadoop_env()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/params.py
deleted file mode 100644
index 91e3008..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/params.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import collections
-import json
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  mapreduce_libs_path = "/usr/phd/current/hadoop-mapreduce-client/*"
-  hadoop_libexec_dir = "/usr/phd/current/hadoop-client/libexec"
-  hadoop_home = "/usr/phd/current/hadoop-client"
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_home = "/usr/lib/hadoop"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-versioned_phd_root = '/usr/phd/current'
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/phd-utils"
-else:
-  jsvc_path = "/usr/lib/phd-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-
-has_namenode = not len(namenode_host) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-if has_oozie_server:
-  user_to_groups_dict[oozie_user] = [proxyuser_group]
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/shared_initialization.py
deleted file mode 100644
index 126b8bb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-ANY/scripts/shared_initialization.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-
-
-def setup_jce():
-  import params
-  
-  if not params.jdk_name:
-    return
-  
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-  
-  if params.jce_policy_zip is not None:
-    jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
-    download_jce = format("mkdir -p {artifact_dir}; \
-    curl -kf -x \"\" --retry 10 \
-    {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
-    Execute( download_jce,
-             path = ["/bin","/usr/bin/"],
-             not_if =format("test -e {jce_curl_target}"),
-             ignore_failures = True,
-             environment = environment
-    )
-  elif params.security_enabled:
-    # Something weird is happening
-    raise Fail("Security is enabled, but JCE policy zip is not specified.")
-  
-  if params.security_enabled:
-    security_dir = format("{java_home}/jre/lib/security")
-    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
-    Execute(extract_cmd,
-            only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
-            cwd  = security_dir,
-            path = ['/bin/','/usr/bin']
-    )
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-  
-  for group in params.group_list:
-    Group(group,
-        ignore_failures = params.ignore_groupsusers_create
-    )
-    
-  for user in params.user_list:
-    User(user,
-        gid = params.user_to_gid_dict[user],
-        groups = params.user_to_groups_dict[user],
-        ignore_failures = params.ignore_groupsusers_create       
-    )
-           
-  set_uid(params.smoke_user, params.smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    set_uid(params.hbase_user, params.hbase_user_dirs)
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-    
-def setup_hadoop_env():
-  import params
-  if params.has_namenode:
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-    Directory(params.hadoop_conf_empty_dir,
-              recursive=True,
-              owner='root',
-              group='root'
-    )
-    Link(params.hadoop_conf_dir,
-         to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}")
-    )
-    File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
-         owner=tc_owner,
-         content=InlineTemplate(params.hadoop_env_sh_template)
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index 61fba18..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index a8ad47e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import json
-import collections
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-if has_nagios:
-  user_to_gid_dict[nagios_user] = nagios_group
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/repo_initialization.py
deleted file mode 100644
index 39a59cd..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/repo_initialization.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import *
-import json
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/PHD/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"PHD-2.0._\",\"repoName\":\"PHD\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/PHD/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-
-def install_repos():
-  import params
-  template = "repo_suse_rhel.j2" if System.get_instance().os_family in ["suse", "redhat"] else "repo_ubuntu.j2"
-  _alter_repo("create", params.repo_info, template)
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 1b2559b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
-  java_dir = os.path.dirname(params.java_home)
-  java_exec = format("{java_home}/bin/java")
-
-  if not params.jdk_name:
-    return
-
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-
-  Execute(format("mkdir -p {artifact_dir} ; \
-  curl -kf -x \"\" \
-  --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}"),
-          environment = environment)
-
-  if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
-  elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
-
-  Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}")
-  )
-
-def install_packages():
-  import params
-  packages = ['unzip', 'curl']
-  if params.rpm_version:
-    packages.append('phd-select')
-  Package(packages)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_suse_rhel.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_suse_rhel.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_suse_rhel.j2
deleted file mode 100644
index d486f89..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_suse_rhel.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-[{{repo_id}}]
-name={{repo_file_name}}
-{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
-
-path=/
-enabled=1
-gpgcheck=0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_ubuntu.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_ubuntu.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_ubuntu.j2
deleted file mode 100644
index 52d4c9a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-INSTALL/templates/repo_ubuntu.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{package_type}} {{base_url}} {{components}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-RESTART/scripts/hook.py
deleted file mode 100644
index 14b9d99..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-RESTART/scripts/hook.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index 82dbda1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    export PATH=$PATH:$bin_dir
-    su -s /bin/bash - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index 7e12962..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/hook.py
deleted file mode 100644
index c90a55c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/params.py
deleted file mode 100644
index 96cd6d8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,158 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  mapreduce_libs_path = "/usr/phd/current/hadoop-mapreduce-client/*"
-  hadoop_libexec_dir = "/usr/phd/current/hadoop-client/libexec"
-  hadoop_lib_home = "/usr/phd/current/hadoop-client/lib"
-  hadoop_bin = "/usr/phd/current/hadoop-client/sbin"
-  hadoop_home = '/usr/phd/current/hadoop-client'
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_lib_home = "/usr/lib/hadoop/lib"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_home = '/usr'
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#users and groups
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-#hadoop params
-
-if has_namenode:
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-
-task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-ambari_server_resources = config['hostLevelParams']['jdk_location']
-oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/phd-utils"
-else:
-  jsvc_path = "/usr/lib/phd-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None


[14/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
deleted file mode 100644
index d7c6704..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
+++ /dev/null
@@ -1,1032 +0,0 @@
-14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
-14/07/28 17:01:48 INFO balancer.Balancer: namenodes = [hdfs://evhubudsd1aae.budapest.epam.com:8020]
-14/07/28 17:01:48 INFO balancer.Balancer: p         = Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
-14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
-14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
-14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys every 150 minute(s)
-14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.887235026238486]]
-14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
-14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the cluster balanced.
-14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:03:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.803451571241915]]
-14/07/28 17:03:00 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.262867215362437]]
-14/07/28 17:03:00 INFO balancer.Balancer: Need to move 5.58 GB to make the cluster balanced.
-14/07/28 17:03:00 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:03:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073937443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926003 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073916372 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926002 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073920016 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:05 INFO balancer.Balancer: Moving block 1073937461 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:11 INFO balancer.Balancer: Moving block 1073743437 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:03:20 INFO balancer.Balancer: Moving block 1073743443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:31 INFO balancer.Balancer: Moving block 1073743449 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:03:34 INFO balancer.Balancer: Moving block 1073743440 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:04:07 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.70875539052811]]
-14/07/28 17:04:07 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.35756339607624]]
-14/07/28 17:04:07 INFO balancer.Balancer: Need to move 5.40 GB to make the cluster balanced.
-14/07/28 17:04:07 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:04:07 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:04:07 INFO balancer.Balancer: Moving block 1073743776 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073915941 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930161 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073908316 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:09 INFO balancer.Balancer: Moving block 1073930163 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:51 INFO balancer.Balancer: Moving block 1073947549 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:04 INFO balancer.Balancer: Moving block 1073863141 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:06 INFO balancer.Balancer: Moving block 1073863139 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:05:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.53815392807349]]
-14/07/28 17:05:14 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.528164858530864]]
-14/07/28 17:05:14 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
-14/07/28 17:05:14 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:05:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945158 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918874 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918873 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945162 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918867 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073914540 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918868 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073931861 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:05:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.538117645568114]]
-14/07/28 17:05:50 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.52820114103624]]
-14/07/28 17:05:50 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
-14/07/28 17:05:50 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:05:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073916888 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073925481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073920767 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073908143 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073911961 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073929306 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:09 INFO balancer.Balancer: Moving block 1073863170 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:06:33 INFO balancer.Balancer: Moving block 1073929250 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:35 INFO balancer.Balancer: Moving block 1073863186 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:06:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.407811418798076]]
-14/07/28 17:06:56 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.658507367806276]]
-14/07/28 17:06:56 INFO balancer.Balancer: Need to move 4.81 GB to make the cluster balanced.
-14/07/28 17:06:56 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:06:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073919724 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073915864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073910902 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949844 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926217 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073919721 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926320 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073946575 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949843 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:07:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.4068167244793]]
-14/07/28 17:07:33 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.659502062125057]]
-14/07/28 17:07:33 INFO balancer.Balancer: Need to move 4.80 GB to make the cluster balanced.
-14/07/28 17:07:33 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:07:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073948620 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073917051 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:07:34 INFO balancer.Balancer: Moving block 1073924651 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:07:40 INFO balancer.Balancer: Moving block 1073742834 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:08:55 INFO balancer.Balancer: Moving block 1073894040 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:08:56 INFO balancer.Balancer: Moving block 1073932476 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:08:59 INFO balancer.Balancer: Moving block 1073742598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:00 INFO balancer.Balancer: Moving block 1073893997 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:09:11 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.144332676814294]]
-14/07/28 17:09:11 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92198610979006]]
-14/07/28 17:09:11 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
-14/07/28 17:09:11 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:09:11 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920127 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743556 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743557 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073929950 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073942945 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920115 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743559 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073947343 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920075 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:09:47 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.14396676101451]]
-14/07/28 17:09:47 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92215625345692]]
-14/07/28 17:09:47 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
-14/07/28 17:09:47 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:09:47 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951772 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951752 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951754 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:52 INFO balancer.Balancer: Moving block 1073951747 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:56 INFO balancer.Balancer: Moving block 1073951765 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:53 INFO balancer.Balancer: Moving block 1073951746 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951745 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951744 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:11:24 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.9413931647133]]
-14/07/28 17:11:24 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:11:24 INFO balancer.Balancer: Need to move 3.89 GB to make the cluster balanced.
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 5.84 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 2.64 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 1.31 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940539 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940537 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927798 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073935420 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927775 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073923954 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073918163 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073949253 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073931581 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073923922 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073931532 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073949248 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073923928 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073927787 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073949252 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073906578 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073914353 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073931557 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073910459 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:12:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.923538618186065]]
-14/07/28 17:12:00 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:12:00 INFO balancer.Balancer: Need to move 3.86 GB to make the cluster balanced.
-14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 2.61 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 7.18 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:12:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073949133 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.7:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945194 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927453 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923118 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905689 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914494 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905688 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923119 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914488 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905681 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905677 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927648 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945235 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945226 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073910053 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927664 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:12:29 INFO balancer.Balancer: Moving block 1073905173 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905177 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905171 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:21 INFO balancer.Balancer: Moving block 1073905175 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:27 INFO balancer.Balancer: Moving block 1073905172 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:13:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.60177342833359]]
-14/07/28 17:13:37 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:13:37 INFO balancer.Balancer: Need to move 3.23 GB to make the cluster balanced.
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.73 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 375.17 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.00 GB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.03 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914692 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927391 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927383 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923582 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905952 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914693 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923467 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918495 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923466 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948829 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945548 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948902 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945546 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905987 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945549 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918570 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945542 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073927370 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073914708 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073948908 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073918565 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073923572 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:46 INFO balancer.Balancer: Moving block 1073936056 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:49 INFO balancer.Balancer: Moving block 1073936057 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:52 INFO balancer.Balancer: Moving block 1073936063 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936045 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936034 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936032 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936033 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:14:41 INFO balancer.Balancer: Moving block 1073936036 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:15:13 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.2458785989085]]
-14/07/28 17:15:13 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:15:13 INFO balancer.Balancer: Need to move 2.53 GB to make the cluster balanced.
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 5.46 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 683.02 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934407 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073926699 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073907624 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930612 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073950332 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934387 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930508 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934414 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073945924 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073922816 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073934411 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073926698 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073922838 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073919113 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073922843 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073907649 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073950223 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:15:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.23893576243181]]
-14/07/28 17:15:49 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:15:49 INFO balancer.Balancer: Need to move 2.52 GB to make the cluster balanced.
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 375.06 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 4.44 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 1.33 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073931740 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073927810 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923141 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073910191 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073905793 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073940704 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949348 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936134 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914594 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949356 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936148 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936164 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936158 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949359 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073918912 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914616 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936151 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923999 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:15:50 INFO balancer.Balancer: Moving block 1073940722 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073927855 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073906497 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073949350 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.224:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073945051 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:16:25 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.236639727566796]]
-14/07/28 17:16:25 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:16:25 INFO balancer.Balancer: Need to move 2.51 GB to make the cluster balanced.
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 2.36 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 463.99 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942946 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947339 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073912361 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926131 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947341 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073929961 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743570 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916254 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743604 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743581 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926130 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073920078 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916287 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073933727 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908503 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743586 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743580 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937539 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942916 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743590 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947329 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743599 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743600 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073895265 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937542 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916258 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916286 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:16:47 INFO balancer.Balancer: Moving block 1073862841 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:17:01 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1720712908457]]
-14/07/28 17:17:01 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:17:01 INFO balancer.Balancer: Need to move 2.39 GB to make the cluster balanced.
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 698.32 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915689 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073946573 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915690 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915841 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073919491 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915694 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915842 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073949829 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073895888 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949830 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073922418 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073931011 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949848 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904475 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073946583 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904561 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949813 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073915703 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073926226 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:17:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.17123487505752]]
-14/07/28 17:17:37 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:17:37 INFO balancer.Balancer: Need to move 2.38 GB to make the cluster balanced.
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.23 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 373.37 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.76 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951505 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951406 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951465 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951428 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951479 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951294 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951363 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951445 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951368 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951466 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951325 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.224:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951296 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951333 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951315 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951502 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951383 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951489 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951504 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951313 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951326 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951310 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073951520 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073864141 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:18:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.13074467796647]]
-14/07/28 17:18:14 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:18:14 INFO balancer.Balancer: Need to move 2.31 GB to make the cluster balanced.
-14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 9.08 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 729.65 MB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:18:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935830 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931492 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931497 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073913899 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910416 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928121 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931496 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927763 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935825 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935414 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928117 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928114 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935419 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935418 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910423 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073949598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:18:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1305062958578]]
-14/07/28 17:18:50 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:18:50 INFO balancer.Balancer: Need to move 2.30 GB to make the cluster balanced.
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 895.07 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 7.38 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930642 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950456 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934505 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950457 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934524 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930646 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073915219 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934502 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930640 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073926854 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934510 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934503 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926851 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926857 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073930652 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:18:52 INFO balancer.Balancer: Moving block 1073930651 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:19:02 INFO balancer.Balancer: Moving block 1073934496 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:19:03 INFO balancer.Balancer: Moving block 1073934497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:19:26 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.07965400229293]]
-14/07/28 17:19:26 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:19:26 INFO balancer.Balancer: Need to move 2.21 GB to make the cluster balanced.
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 333.25 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 881.78 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.17 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931910 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905704 from 10.253.130.9:50010 to 10.253.130.7:5

<TRUNCATED>

[20/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index c38366f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import functions
-import ganglia_server_service
-
-
-class GangliaServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-    
-    functions.turn_off_autostart(params.gmond_service_name) # since the package is installed as well
-    functions.turn_off_autostart("gmetad")
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    ganglia_server_service.server("start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/gmetad.pid")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    ganglia.groups_and_users()
-    ganglia.config()
-
-    generate_daemon("gmetad",
-                    name = "gmetad",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-    change_permission()
-    server_files()
-    File(path.join(params.ganglia_dir, "gmetad.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-def change_permission():
-  import params
-
-  Directory(params.dwoo_path,
-            mode=0755,
-            recursive=True
-  )
-  Execute(format("chown -R {web_user} {dwoo_path}"))
-
-def server_files():
-  import params
-
-  rrd_py_path = params.rrd_py_path
-  Directory(rrd_py_path,
-            recursive=True
-  )
-  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
-  TemplateConfig(rrd_py_file_path,
-                 owner="root",
-                 group="root",
-                 mode=0755
-  )
-  rrd_file_owner = params.gmetad_user
-
-  Directory(params.rrdcached_base_dir,
-            owner=rrd_file_owner,
-            group=rrd_file_owner,
-            mode=0755,
-            recursive=True
-  )
-  
-  if System.get_instance().os_family in ["ubuntu","suse"]:
-    File( params.ganglia_apache_config_file,
-      content = Template("ganglia.conf.j2"),
-      mode = 0644
-    )
-
-
-if __name__ == "__main__":
-  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
-  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  Execute(format(command),
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index f8373ac..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,160 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-user_group = config['configurations']['cluster-env']["user_group"]
-ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir", "/etc/ganglia/hdp")
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-
-gmond_add_clusters_str = default("/configurations/ganglia-env/additional_clusters", None)
-if gmond_add_clusters_str and gmond_add_clusters_str.isspace():
-  gmond_add_clusters_str = None
-
-gmond_app_strs = [] if gmond_add_clusters_str is None else gmond_add_clusters_str.split(',')
-gmond_apps = []
-
-for x in gmond_app_strs:
-  a,b = x.strip().split(':')
-  gmond_apps.append((a.strip(),b.strip()))
-
-if System.get_instance().os_family == "ubuntu":
-  gmond_service_name = "ganglia-monitor"
-  modules_dir = "/usr/lib/ganglia"
-else:
-  gmond_service_name = "gmond"
-  modules_dir = "/usr/lib64/ganglia"
-
-webserver_group = "apache"
-rrdcached_base_dir = config['configurations']['ganglia-env']["rrdcached_base_dir"]
-rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
-rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
-rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
-rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = set(default("/clusterHostInfo/namenode_host", []))
-jtnode_host = set(default("/clusterHostInfo/jtnode_host", []))
-rm_host = set(default("/clusterHostInfo/rm_host", []))
-hs_host = set(default("/clusterHostInfo/hs_host", []))
-hbase_master_hosts = set(default("/clusterHostInfo/hbase_master_hosts", []))
-# datanodes are marked as slave_hosts
-slave_hosts = set(default("/clusterHostInfo/slave_hosts", []))
-tt_hosts = set(default("/clusterHostInfo/mapred_tt_hosts", []))
-nm_hosts = set(default("/clusterHostInfo/nm_hosts", []))
-hbase_rs_hosts = set(default("/clusterHostInfo/hbase_rs_hosts", []))
-flume_hosts = set(default("/clusterHostInfo/flume_hosts", []))
-jn_hosts = set(default("/clusterHostInfo/journalnode_hosts", []))
-nimbus_server_hosts = set(default("/clusterHostInfo/nimbus_hosts", []))
-supervisor_server_hosts = set(default("/clusterHostInfo/supervisor_hosts", []))
-
-pure_slave = not hostname in (namenode_host | jtnode_host | rm_host | hs_host | \
-                              hbase_master_hosts | slave_hosts | tt_hosts | hbase_rs_hosts | \
-                              flume_hosts | nm_hosts | jn_hosts | nimbus_server_hosts | \
-                              supervisor_server_hosts)
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_nodemanager = hostname in nm_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-is_ganglia_server_host = (hostname == ganglia_server_host)
-is_jn_host = hostname in jn_hosts
-is_nimbus_host = hostname in nimbus_server_hosts
-is_supervisor_host = hostname in supervisor_server_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_nodemanager = not len(nm_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-has_journalnode = not len(jn_hosts) == 0
-has_nimbus_server = not len(nimbus_server_hosts) == 0
-has_supervisor_server = not len(supervisor_server_hosts) == 0
-
-ganglia_cluster_names = {
-  "jn_hosts": [("HDPJournalNode", 8654)],
-  "flume_hosts": [("HDPFlumeServer", 8655)],
-  "hbase_rs_hosts": [("HDPHBaseRegionServer", 8656)],
-  "nm_hosts": [("HDPNodeManager", 8657)],
-  "mapred_tt_hosts": [("HDPTaskTracker", 8658)],
-  "slave_hosts": [("HDPDataNode", 8659)],
-  "namenode_host": [("HDPNameNode", 8661)],
-  "jtnode_host": [("HDPJobTracker", 8662)],
-  "hbase_master_hosts": [("HDPHBaseMaster", 8663)],
-  "rm_host": [("HDPResourceManager", 8664)],
-  "hs_host": [("HDPHistoryServer", 8666)],
-  "nimbus_hosts": [("HDPNimbus", 8649)],
-  "supervisor_hosts": [("HDPSupervisor", 8650)],
-  "ReservedPort1": [("ReservedPort1", 8667)],
-  "ReservedPort2": [("ReservedPort2", 8668)],
-  "ReservedPort3": [("ReservedPort3", 8669)]
-}
-
-ganglia_clusters = [("HDPSlaves", 8660)]
-
-for key in ganglia_cluster_names:
-  property_name = format("/clusterHostInfo/{key}")
-  hosts = set(default(property_name, []))
-  if not len(hosts) == 0:
-    for x in ganglia_cluster_names[key]:
-      ganglia_clusters.append(x)
-
-if len(gmond_apps) > 0:
-  for gmond_app in gmond_apps:
-    ganglia_clusters.append(gmond_app)
-
-ganglia_apache_config_file = "/etc/apache2/conf.d/ganglia.conf"
-ganglia_web_path="/var/www/html/ganglia"
-if System.get_instance().os_family == "suse":
-  rrd_py_path = '/srv/www/cgi-bin'
-  dwoo_path = '/var/lib/ganglia-web/dwoo'
-  web_user = "wwwrun"
-  # for upgrade purposes as path to ganglia was changed
-  if not os.path.exists(ganglia_web_path):
-    ganglia_web_path='/srv/www/htdocs/ganglia'
-
-elif  System.get_instance().os_family == "redhat":
-  rrd_py_path = '/var/www/cgi-bin'
-  dwoo_path = '/var/lib/ganglia/dwoo'
-  web_user = "apache"
-elif  System.get_instance().os_family == "ubuntu":
-  rrd_py_path = '/usr/lib/cgi-bin'
-  ganglia_web_path = '/usr/share/ganglia-webfrontend'
-  dwoo_path = '/var/lib/ganglia/dwoo'
-  web_user = "www-data"

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 0c69ca9..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/ganglia.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/ganglia.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/ganglia.conf.j2
deleted file mode 100644
index a08fb31..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/ganglia.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-Alias /ganglia "{{ganglia_web_path}}"
-
-<Directory "{{ganglia_web_path}}">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-#  AuthName "Ganglia Access"
-#  AuthType Basic
-#  AuthUserFile /etc/ganglia/htpasswd.users
-#  Require valid-user
-</Directory>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index ffb4e84..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-{% for x in ganglia_clusters %}
-    {{ x[0] }}       	  {{ganglia_server_host}}  {{ x[1] }}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 0b68623..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-RRD_ROOTDIR={{rrdcached_base_dir}}
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};
-MODULES_DIR={{modules_dir}}
-GANGLIA_WEB_PATH={{ganglia_web_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 6c24c7f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,85 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-RRDCACHED_WRITE_THREADS={{rrdcached_write_threads}}
-RRDCACHED_TIMEOUT={{rrdcached_timeout}}
-RRDCACHED_FLUSH_TIMEOUT={{rrdcached_flush_timeout}}
-RRDCACHED_DELAY={{rrdcached_delay}}
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/rrd.py.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/rrd.py.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/rrd.py.j2
deleted file mode 100644
index 65d70e2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/templates/rrd.py.j2
+++ /dev/null
@@ -1,361 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-# NOTE: This script is executed by Python 2.4 on Centos 5. 
-# Make sure your changes are compatible.
-
-import cgi
-import glob
-import os
-import re
-import rrdtool
-import sys
-import time
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-'''
-  Loads rrd file info
-'''
-def loadRRDData(file, cf, start, end, resolution):
-  args = [file, cf, "--daemon", "unix:{{ganglia_runtime_dir}}/rrdcached.limited.sock"]
-
-  if start is not None:
-    args.extend(["-s", start])
-  else:
-    args.extend(["-s", "now-10m"])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  return rrdtool.fetch(args)
-
-'''
-  Collects metrics across several matching filenames.
-'''
-def collectStatMetrics(clusterName, hostName, metricName, files, cf, start, end, resolution):
-  if clusterName[0] is not '/':
-    clusterName.insert(0, '/')
-
-  metricParts = metricName.split('.')
-
-  # already know there's at least one
-  metricStat = metricParts[-1]
-  metricName = '.'.join(metricParts[:-1])
-
-  isRate = False
-  if len(metricParts) > 1 and metricParts[-2] == '_rate':
-    isRate = True
-    metricName = '.'.join(metricParts[:-2])
-
-  pattern = re.compile(metricName + '\.rrd$')
-  matchedFiles = filter(pattern.match, files)
-
-  parentPath = os.path.join(*clusterName)
-
-  actualFiles = []
-  for matchedFile in matchedFiles:
-    if hostName != "__SummaryInfo__":
-      osFiles = glob.glob(os.path.join(parentPath, hostName, matchedFile))
-    else:
-      osFiles = glob.glob(os.path.join(parentPath, '*', matchedFile))
-
-    for f in osFiles:
-      if -1 == f.find("__SummaryInfo__"):
-        actualFiles.append(f)
-
-  if len(actualFiles) == 0:
-    return
-
-  '''
-  [
-    {
-      "step_value": update each iteration
-      "count": increase by 1 each iteration
-      "sum": increase by value each iteration
-      "avg": update each iteration as sum/count
-      "min": update each iteration if step_value < old min OR min is missing (first time)
-      "max": update each iteration if step_value > old max OR max is missing (first time)
-    }
-  ]
-  '''
-
-  timestamp = None
-  stepsize = None
-  concreteMetricName = None
-  vals = None # values across all files
-
-  for file in actualFiles:
-    rrdMetric = loadRRDData(file, cf, start, end, resolution)
-    
-    if timestamp is None and stepsize is None and concreteMetricName is None:
-      timestamp = rrdMetric[0][0]
-      stepsize = rrdMetric[0][2]
-      
-      if not isRate:
-        suffix = metricStat
-      else:
-        suffix = '_rate.' + metricStat
-      
-      concreteMetricName = file.split(os.sep).pop().replace('rrd', suffix)
-
-    metricValues = rrdMetric[2]
-
-    if vals is None:
-      vals = [None] * len(metricValues)
-
-    i = 0
-    for tuple in metricValues:
-      if vals[i] is None:
-        vals[i] = {}
-        vals[i]['count'] = 0
-        vals[i]['_sum'] = 0.0
-        vals[i]['_avg'] = 0.0
-        vals[i]['_min'] = 999999999999.99
-        vals[i]['_max'] = 0.0
-
-      rawValue = tuple[0]
-      vals[i]['step_value'] = rawValue
-      if rawValue is None:
-        i += 1
-        continue
-
-      if isRate:
-        if 0 == i:
-          rawValue = 0.0
-        elif vals[i-1]['step_value'] is None:
-          rawValue = 0.0
-        else:
-          rawValue = (rawValue - vals[i-1]['step_value']) / stepsize
-      
-      vals[i]['count'] += 1 
-      vals[i]['_sum'] += rawValue
-
-      vals[i]['_avg'] = vals[i]['_sum']/vals[i]['count']
-
-      if rawValue < vals[i]['_min']:
-        vals[i]['_min'] = rawValue
-
-      if rawValue > vals[i]['_max']:
-        vals[i]['_max'] = rawValue
-      
-      i += 1
-
-  sys.stdout.write("sum\n")
-  sys.stdout.write(clusterName[len(clusterName)-1] + "\n")
-  sys.stdout.write(hostName + "\n")
-  sys.stdout.write(concreteMetricName + "\n")
-  sys.stdout.write(str(timestamp) + "\n")
-  sys.stdout.write(str(stepsize) + "\n")
-
-  for val in vals:
-    if val['step_value'] is None:
-      sys.stdout.write("[~n]")
-    else:
-      sys.stdout.write(str(val[metricStat]))
-    sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-
-  return
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
- 
-  rrdMetric = loadRRDData(file, cf, start, end, resolution)
-
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx -= 1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-
-def stripList(l):
-  return ([x.strip() for x in l])
-
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "{{rrdcached_base_dir}}"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-def _walk(*args, **kwargs):
-  for root, dirs, files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root, dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(os.path.join(rrdPath,cluster)):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                      os.path.join(path, file), cf, start, end, resolution,
-                      pointInTime)
-        else:
-          need_stats = False
-          parts = metric.split(".")
-          if len(parts) > 0 and parts[-1] in ['_min', '_max', '_avg', '_sum']:
-              need_stats = True
-
-          if need_stats and not pointInTime:
-            collectStatMetrics(pathParts[:-1], pathParts[-1], metric, files, cf, start, end, resolution)
-          else:
-            #Regex as metric name
-            metricRegex = metric + '\.rrd$'
-            p = re.compile(metricRegex)
-            matchedFiles = filter(p.match, files)
-            for matchedFile in matchedFiles:
-              printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                         os.path.join(path, matchedFile), cf, start, end,
-                         resolution, pointInTime)
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-env.xml
deleted file mode 100644
index f3bbc57..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-env.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Pid Directory for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>HBase RegionServer Heap Size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_max</name>
-    <value>512</value>
-    <description>
-Sets the upper bound on HBase RegionServers' young generation size.
-This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
-and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
-    </description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_ratio</name>
-    <value>0.2</value>
-    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <property-type>USER</property-type>
-    <description>HBase User Name.</description>
-  </property>
-
-  <!-- hbase-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hbase-env.sh file</description>
-    <value>
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-log4j.xml
deleted file mode 100644
index 57b3845..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-log4j.xml
+++ /dev/null
@@ -1,143 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Define some default values that can be overridden by system properties
-hbase.root.logger=INFO,console
-hbase.security.logger=INFO,console
-hbase.log.dir=.
-hbase.log.file=hbase.log
-
-# Define the root logger to the system property "hbase.root.logger".
-log4j.rootLogger=${hbase.root.logger}
-
-# Logging Threshold
-log4j.threshold=ALL
-
-#
-# Daily Rolling File Appender
-#
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-# Rolling File Appender properties
-hbase.log.maxfilesize=256MB
-hbase.log.maxbackupindex=20
-
-# Rolling File Appender
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-#
-# Security audit appender
-#
-hbase.security.log.file=SecurityAuth.audit
-hbase.security.log.maxfilesize=256MB
-hbase.security.log.maxbackupindex=20
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
-log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.category.SecurityLogger=${hbase.security.logger}
-log4j.additivity.SecurityLogger=false
-#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
-
-#
-# Null Appender
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-# Custom Logging levels
-
-log4j.logger.org.apache.zookeeper=INFO
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.hbase=DEBUG
-# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
-#log4j.logger.org.apache.hadoop.dfs=DEBUG
-# Set this class to log INFO only otherwise its OTT
-# Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
-
-
-# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
-#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
-
-# Uncomment the below if you want to remove logging of client region caching'
-# and scan of .META. messages
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
-# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index 2f12801..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 84900d1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,331 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.local.dir</name>
-    <value>${hbase.tmp.dir}/local</value>
-    <description>Directory on the local filesystem to be used as a local storage
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-    The number of times the region flush operation will be retried.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
-      (no authentication), and 'kerberos'.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>Disables version verification.</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>Path to domain socket.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metainfo.xml
deleted file mode 100644
index 9e36a09..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,139 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <displayName>HBase</displayName>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.98.5.phd.3.0.0.0</version>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <displayName>HBase Master</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <displayName>RegionServer</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <displayName>HBase Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hbase-site.xml</fileName>
-              <dictionaryName>hbase-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hbase-env.sh</fileName>
-              <dictionaryName>hbase-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hbase-log4j</dictionaryName>
-            </configFile>            
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hbase</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-        <config-type>hbase-env</config-type>
-        <config-type>hbase-log4j</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>


[12/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 87940a7..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,538 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>hive.heapsize</name>
-    <value>1024</value>
-    <description>Hive Java heap size</description>
-  </property>
-
-<!-- Begin changes metastore database to postgres -->
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc:postgresql://localhost/hive</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>org.postgresql.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-<!-- End changes metastore database to postgres -->
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property require-input="true">
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value>false</value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side to run code
-      whenever databases, tables, and partitions are created, altered, or dropped.
-      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side to run code
-      whenever databases, tables, and partitions are created, altered, or dropped.
-      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
-      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
-      process runs as.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.impersonation</name>
-    <description>Enable user impersonation for HiveServer2</description>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication</name>
-    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
-    <value>NOSASL</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable HDFS filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable local filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sortmergebucketmapjoin</name>
-    <value>true</value>
-    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
-      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
-      this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>false</value>
-    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-    of buckets, a sort-merge join can be performed by setting this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>4</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>true</value>
-    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-    Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-  <property>
-    <name>hive.execution.engine</name>
-    <value>mr</value>
-    <description>Whether to use MR or Tez</description>
-  </property>
-
-  <!-- 
-  <property>
-    <name>hive.exec.post.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>Comma-separated list of post-execution hooks to be invoked for each statement.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.pre.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>Comma-separated list of pre-execution hooks to be invoked for each statement.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.failure.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>Comma-separated list of on-failure hooks to be invoked for each statement.</description>
-  </property>
-  -->
-
-  <property>
-    <name>hive.vectorized.groupby.maxentries</name>
-    <value>100000</value>
-    <description>Max number of entries in the vector group by aggregation hashtables.
-      Exceeding this will trigger a flush irrelevant of memory pressure condition.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.checkinterval</name>
-    <value>1024</value>
-    <description>Number of entries added to the group by aggregation hash before a reocmputation of average entry size is performed.</description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.flush.percent</name>
-    <value>0.1</value>
-    <description>Percent of entries in the group by aggregation hash flushed when the memory treshold is exceeded.</description>
-  </property>
-
-  <property>
-    <name>hive.stats.autogather</name>
-    <value>true</value>
-    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.container.size</name>
-    <value>682</value>
-    <description>By default, Tez uses the java options from map tasks. Use this property to override that value. Assigned value must match value specified for mapreduce.map.child.java.opts.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.input.format</name>
-    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
-    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.java.opts</name>
-    <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC</value>
-    <description>Java command line options for Tez. Must be assigned the same value as mapreduce.map.child.java.opts.</description>
-  </property>
-
-  <property>
-    <name>hive.compute.query.using.stats</name>
-    <value>true</value>
-    <description>
-      When set to true Hive will answer a few queries like count(1) purely using stats
-      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
-      For more advanced stats collection need to run analyze table queries.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.orc.splits.include.file.footer</name>
-    <value>false</value>
-    <description>
-      If turned on splits generated by orc will include metadata about the stripes in the file. This
-      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.limit.optimize.enable</name>
-    <value>true</value>
-    <description>Whether to enable the optimization of trying a smaller subset of data for simple LIMIT first.</description>
-  </property>
-
-  <property>
-    <name>hive.limit.pushdown.memory.usage</name>
-    <value>0.04</value>
-    <description>The max memory to be used for hash in RS operator for top K selection.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.tez.default.queues</name>
-    <value>default</value>
-    <description>A comma-separated list of queues configured for the cluster.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.tez.sessions.per.default.queue</name>
-    <value>1</value>
-    <description>The number of sessions for each queue named in the hive.server2.tez.default.queues.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.tez.initialize.default.sessions</name>
-    <value>false</value>
-    <description>Enables a user to use HiveServer2 without enabling Tez for HiveServer2. Users may potentially may want to run queries with Tez without a pool of sessions.</description>
-  </property>
-
-  <property>
-    <name>hive.txn.manager</name>
-    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
-    <description>Select the class to do transaction management. The default DummyTxnManager does no transactions and retains the legacy behavior.</description>
-  </property>
-
-  <property>
-    <name>hive.txn.timeout</name>
-    <value>300</value>
-    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
-  </property>
-
-  <property>
-    <name>hive.txn.max.open.batch</name>
-    <value>1000</value>
-    <description>Maximum number of transactions that can be fetched in one call to open_txns(). Increasing this will decrease the number of delta files created when streaming data into Hive. But it will also increase the number of open transactions at any given time, possibly impacting read performance.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.initiator.on</name>
-    <value>false</value>
-    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.worker.threads</name>
-    <value>0</value>
-    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.worker.timeout</name>
-    <value>86400L</value>
-    <description>Time, in seconds, before a given compaction in working state is declared a failure and returned to the initiated state.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.check.interval</name>
-    <value>300L</value>
-    <description>Time in seconds between checks to see if any partitions need compacted. This should be kept high because each check for compaction requires many calls against the NameNode.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.delta.num.threshold</name>
-    <value>10</value>
-    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.delta.pct.threshold</name>
-    <value>0.1f</value>
-    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.abortedtxn.threshold</name>
-    <value>1000</value>
-    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
-  </property>
-
-  <property>
-    <name>datanucleus.cache.level2.type</name>
-    <value>none</value>
-    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.thrift.port</name>
-    <value>10000</value>
-    <description>
-      TCP port number to listen on, default 10000.
-    </description>
-  </property>
-
-  <property>
-      <name>hive.server2.authentication.spnego.principal</name>
-      <value>HTTP/_HOST@EXAMPLE.COM</value>
-      <description>
-          This keytab would be used by HiveServer2 when Kerberos security is enabled and HTTP transport mode is used.
-      </description>
-  </property>
-
-  <property>
-      <name>hive.server2.authentication.spnego.keytab</name>
-      <value>/etc/security/keytabs/spnego.service.keytab</value>
-      <description>
-          The SPNEGO service principal would be used by HiveServer2 when Kerberos security is enabled and HTTP transport mode is used.
-      </description>
-  </property>
-
-  <property>
-    <name>hive.server2.support.dynamic.service.discovery</name>
-    <value>false</value>
-    <description>Whether HiveServer2 supports dynamic service discovery for its
-      clients. To support this, each instance of HiveServer2 currently uses
-      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
-      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
-      connection string.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.zookeeper.namespace</name>
-    <value>hiveserver2</value>
-    <description>The parent node in ZooKeeper used by HiveServer2 when
-      supporting dynamic service discovery.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-env.xml
deleted file mode 100644
index 14a473f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-env.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- webhcat-env.sh -->
-  <property>
-    <name>content</name>
-    <description>webhcat-env.sh content</description>
-    <value>
-# The file containing the running pid
-PID_FILE={{webhcat_pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME={{hadoop_home}}
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-site.xml
deleted file mode 100644
index 951dcda..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.home</name>
-    <value>hive.tar.gz/hive</value>
-    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat.home</name>
-    <value>hive.tar.gz/hive/hcatalog</value>
-    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9933,hive.metastore.sasl.enabled=false</value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
deleted file mode 100644
index b0415b1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
+++ /dev/null
@@ -1,777 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` varchar(4000) DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31


[21/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
deleted file mode 100644
index 3b34db8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Define some default values that can be overridden by system properties.
-#
-# For testing, it may also be convenient to specify
-# -Dflume.root.logger=DEBUG,console when launching flume.
-
-#flume.root.logger=DEBUG,console
-flume.root.logger=INFO,LOGFILE
-flume.log.dir={{flume_log_dir}}
-flume.log.file=flume-{{agent_name}}.log
-
-log4j.logger.org.apache.flume.lifecycle = INFO
-log4j.logger.org.jboss = WARN
-log4j.logger.org.mortbay = INFO
-log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
-log4j.logger.org.apache.hadoop = INFO
-
-# Define the root logger to the system property "flume.root.logger".
-log4j.rootLogger=${flume.root.logger}
-
-
-# Stock log4j rolling file appender
-# Default log rotation configuration
-log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.LOGFILE.MaxFileSize=100MB
-log4j.appender.LOGFILE.MaxBackupIndex=10
-log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
-log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-
-# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
-# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
-# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
-# Add "DAILY" to flume.root.logger above if you want to use this
-log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
-log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
-log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
-log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{yyyy-MM-dd}
-log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
-log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-
-# console
-# Add "console" to flume.root.logger above if you want to use this
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
deleted file mode 100644
index 3328acf..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <property-type>USER GROUP</property-type>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <property-type>USER GROUP</property-type>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-  <property>
-    <name>rrdcached_timeout</name>
-    <value>3600</value>
-    <description>(-w) Data is written to disk every timeout seconds. If this option is not specified the default interval of 300 seconds will be used.</description>
-  </property>
-  <property>
-    <name>rrdcached_flush_timeout</name>
-    <value>7200</value>
-      <description>(-f) Every timeout seconds the entire cache is searched for old values which are written to disk. This only concerns files to which updates have stopped, so setting this to a high value, such as 3600 seconds, is acceptable in most cases. This timeout defaults to 3600 seconds.</description>
-  </property>
-  <property>
-    <name>rrdcached_delay</name>
-    <value>1800</value>
-    <description>(-z) If specified, rrdcached will delay writing of each RRD for a random number of seconds in the range [0,delay). This will avoid too many writes being queued simultaneously. This value should be no greater than the value specified in -w. By default, there is no delay.</description>
-  </property>
-  <property>
-    <name>rrdcached_write_threads</name>
-    <value>4</value>
-    <description>(-t) Specifies the number of threads used for writing RRD files. The default is 4. Increasing this number will allow rrdcached to have more simultaneous I/O requests into the kernel. This may allow the kernel to re-order disk writes, resulting in better disk throughput.</description>
-  </property>
-  <property>
-    <name>additional_clusters</name>
-    <value> </value>
-    <description>Add additional desired Ganglia metrics cluster in the form "name1:port1,name2:port2". Ensure that the names and ports are unique across all cluster and ports are available on ganglia server host. Ambari has reserved ports 8667-8669 within its own pool.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 4e96ade..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,127 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>GANGLIA</name>
-      <displayName>Ganglia</displayName>
-      <comment>Ganglia Metrics Collection system (&lt;a href=&quot;http://oss.oetiker.ch/rrdtool/&quot; target=&quot;_blank&quot;&gt;RRDTool&lt;/a&gt; will be installed too)</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>GANGLIA_SERVER</name>
-          <displayName>Ganglia Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/ganglia_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>GANGLIA_MONITOR</name>
-          <displayName>Ganglia Monitor</displayName>
-          <category>SLAVE</category>
-          <cardinality>ALL</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/ganglia_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>python-rrdtool-1.4.5</name>
-            </package>
-            <package>
-              <name>libganglia-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-gmetad-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-web-3.5.7-99.noarch</name>
-            </package>
-            <package>
-              <name>ganglia-gmond-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-gmond-modules-python-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>python-rrdtool</name>
-            </package>
-            <package>
-              <name>gmetad</name>
-            </package>
-            <package>
-              <name>ganglia-webfrontend</name>
-            </package>
-            <package>
-              <name>ganglia-monitor-python</name>
-            </package>
-            <package>
-              <name>rrdcached</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>apache2</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6</osFamily>
-          <packages>
-            <package>
-              <name>httpd</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>ganglia-env</config-type>
-      </configuration-dependencies>
-      <monitoringService>true</monitoringService>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmetad.sh
deleted file mode 100644
index f45e371..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmond.sh
deleted file mode 100644
index fbf524a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkRrdcached.sh
deleted file mode 100644
index 1e0c2e2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetadLib.sh
deleted file mode 100644
index 6a24bed..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-rrd_rootdir "${RRD_ROOTDIR}"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmondLib.sh
deleted file mode 100644
index e7ea83f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/gmondLib.sh
+++ /dev/null
@@ -1,539 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- * Also we use this port for Nagios monitoring
- */
-tcp_accept_channel {
-  bind = 0.0.0.0
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "${MODULES_DIR}/modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "${MODULES_DIR}/moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "${MODULES_DIR}/modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "${MODULES_DIR}/modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "${MODULES_DIR}/modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "${MODULES_DIR}/modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "${MODULES_DIR}/modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/rrdcachedLib.sh
deleted file mode 100644
index a070fca..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index e700eaa..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index b271e06..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -f "${GMETAD_PID_FILE}" ] && [ -z "${gmetadRunningPid}" ]
-    then
-      rm -f ${GMETAD_PID_FILE}; rm -f /var/lock/subsys/hdp-gmetad
-    fi
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index cdf4fe0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-    gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -f "${gmondPidFileName}" ] && [ -z "${gmondRunningPid}" ]
-    then
-      rm -f ${gmondPidFileName}
-    fi
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index dc47f39..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    su -s /bin/bash - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B -t ${RRDCACHED_WRITE_THREADS} \
-             -w ${RRDCACHED_TIMEOUT} -f ${RRDCACHED_FLUSH_TIMEOUT} -z ${RRDCACHED_DELAY} -F"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-
-    #Configure Ganglia Web to work with RRDCached
-    GANGLIA_WEB_CONFIG_FILE=${GANGLIA_WEB_PATH}/conf_default.php
-
-    if [ -f $GANGLIA_WEB_CONFIG_FILE ]
-    then
-      sed -i "s@\$conf\['rrdcached_socket'] =.*@\$conf\['rrdcached_socket'] = \"unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET}\";@" $GANGLIA_WEB_CONFIG_FILE
-      sed -i "s@\$conf\['rrds'] =.*@\$conf\['rrds'] = \"${RRDCACHED_BASE_DIR}\";@" $GANGLIA_WEB_CONFIG_FILE
-      sed -i "s@\$conf\['gmetad_root'] =.*@\$conf\['gmetad_root'] = \"${RRDCACHED_BASE_DIR}\";@" $GANGLIA_WEB_CONFIG_FILE
-    else
-      echo "${GANGLIA_WEB_CONFIG_FILE} can't be found";
-      exit 1;
-    fi
-
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 86731d2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 8824d2a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index bffc9e5..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index 8740c27..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/functions.py
deleted file mode 100644
index b02e688..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/functions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from resource_management import *
-
-
-def turn_off_autostart(service):
-  if System.get_instance().os_family == "ubuntu":
-    Execute(format("update-rc.d {service} disable"),
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    )
-    Execute(format("service {service} stop"), ignore_failures=True)
-    Execute(format("echo 'manual' > /etc/init/{service}.override")) # disbale upstart job
-  else:
-    Execute(format("chkconfig {service} off"),
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 69fde27..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
-  import params
-
-def config():
-  import params
-
-  shell_cmds_dir = params.ganglia_shell_cmds_dir
-  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
-                 'gmondLib.sh', 'rrdcachedLib.sh',
-                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
-                 'startRrdcached.sh', 'stopGmetad.sh',
-                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
-  Directory(shell_cmds_dir,
-            owner="root",
-            group="root",
-            recursive=True
-  )
-  init_file("gmetad")
-  init_file("gmond")
-  for sh_file in shell_files:
-    shell_file(sh_file)
-  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
-    ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
-  import params
-
-  File("/etc/init.d/hdp-" + name,
-       content=StaticFile(name + ".init"),
-       mode=0755
-  )
-
-
-def shell_file(name):
-  import params
-
-  File(params.ganglia_shell_cmds_dir + os.sep + name,
-       content=StaticFile(name),
-       mode=0755
-  )
-
-
-def ganglia_TemplateConfig(name, mode=0755, tag=None):
-  import params
-
-  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
-                 owner="root",
-                 group="root",
-                 template_tag=tag,
-                 mode=mode
-  )
-
-
-def generate_daemon(ganglia_service,
-                    name=None,
-                    role=None,
-                    owner=None,
-                    group=None):
-  import params
-
-  cmd = ""
-  if ganglia_service == "gmond":
-    if role == "server":
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
-    else:
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
-  elif ganglia_service == "gmetad":
-    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
-  else:
-    raise Fail("Unexpected ganglia service")
-  Execute(format(cmd),
-          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
-                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index ede1a0b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,236 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import functions
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-    
-    functions.turn_off_autostart(params.gmond_service_name)
-    functions.turn_off_autostart("gmetad") # since the package is installed as well
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    ganglia_monitor_service.monitor("start")
-
-  def stop(self, env):
-    ganglia_monitor_service.monitor("stop")
-
-
-  def status(self, env):
-    import status_params
-    pid_file_name = 'gmond.pid'
-    pid_file_count = 0
-    pid_dir = status_params.pid_dir
-    # Recursively check all existing gmond pid files
-    for cur_dir, subdirs, files in os.walk(pid_dir):
-      for file_name in files:
-        if file_name == pid_file_name:
-          pid_file = os.path.join(cur_dir, file_name)
-          check_process_status(pid_file)
-          pid_file_count += 1
-    if pid_file_count == 0: # If no any pid file is present
-      raise ComponentIsNotRunning()
-
-
-  def configure(self, env):
-    import params
-
-    ganglia.groups_and_users()
-
-    Directory(params.ganglia_conf_dir,
-              owner="root",
-              group=params.user_group,
-              recursive=True
-    )
-
-    ganglia.config()
-    
-    self.generate_slave_configs()
-
-    Directory(path.join(params.ganglia_dir, "conf.d"),
-              owner="root",
-              group=params.user_group
-    )
-
-    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "gmond.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-    if params.is_ganglia_server_host:
-      self.generate_master_configs()
-
-      if len(params.gmond_apps) != 0:
-        self.generate_app_configs()
-        pass
-      pass
-
-
-  def generate_app_configs(self):
-    import params
-
-    for gmond_app in params.gmond_apps:
-      generate_daemon("gmond",
-                      name=gmond_app[0],
-                      role="server",
-                      owner="root",
-                      group=params.user_group)
-      generate_daemon("gmond",
-                      name = gmond_app[0],
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-    pass
-
-  def generate_slave_configs(self):
-    import params
-
-    generate_daemon("gmond",
-                    name = "HDPSlaves",
-                    role = "monitor",
-                    owner = "root",
-                    group = params.user_group)
-
-
-  def generate_master_configs(self):
-    import params
-     
-    if params.has_namenodes:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_jobtracker:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_masters:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_resourcemanager:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_nodemanager:
-      generate_daemon("gmond",
-                      name = "HDPNodeManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_historyserver:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_slaves:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_nimbus_server:
-      generate_daemon("gmond",
-                      name = "HDPNimbus",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_supervisor_server:
-      generate_daemon("gmond",
-                      name = "HDPSupervisor",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_journalnode:
-      generate_daemon("gmond",
-                      name = "HDPJournalNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    generate_daemon("gmond",
-                    name = "HDPSlaves",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-
-if __name__ == "__main__":
-  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index cf7a4b1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
-  Execute(
-    format(
-      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
-    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )


[18/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
deleted file mode 100644
index 5bcb5b6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
+++ /dev/null
@@ -1,164 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Add or remove servers from draining mode via zookeeper 
-
-require 'optparse'
-include Java
-
-import org.apache.hadoop.hbase.HBaseConfiguration
-import org.apache.hadoop.hbase.client.HBaseAdmin
-import org.apache.hadoop.hbase.zookeeper.ZKUtil
-import org.apache.commons.logging.Log
-import org.apache.commons.logging.LogFactory
-
-# Name of this script
-NAME = "draining_servers"
-
-# Do command-line parsing
-options = {}
-optparse = OptionParser.new do |opts|
-  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
-  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
-                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
-  opts.on('-h', '--help', 'Display usage information') do
-    puts opts
-    exit
-  end
-  options[:debug] = false
-  opts.on('-d', '--debug', 'Display extra debug logging') do
-    options[:debug] = true
-  end
-end
-optparse.parse!
-
-# Return array of servernames where servername is hostname+port+startcode
-# comma-delimited
-def getServers(admin)
-  serverInfos = admin.getClusterStatus().getServerInfo()
-  servers = []
-  for server in serverInfos
-    servers << server.getServerName()
-  end
-  return servers
-end
-
-def getServerNames(hostOrServers, config)
-  ret = []
-  
-  for hostOrServer in hostOrServers
-    # check whether it is already serverName. No need to connect to cluster
-    parts = hostOrServer.split(',')
-    if parts.size() == 3
-      ret << hostOrServer
-    else 
-      admin = HBaseAdmin.new(config) if not admin
-      servers = getServers(admin)
-
-      hostOrServer = hostOrServer.gsub(/:/, ",")
-      for server in servers 
-        ret << server if server.start_with?(hostOrServer)
-      end
-    end
-  end
-  
-  admin.close() if admin
-  return ret
-end
-
-def addServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
-  servers = getServerNames(hostOrServers, config)
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-  
-  begin
-    for server in servers
-      node = ZKUtil.joinZNode(parentZnode, server)
-      ZKUtil.createAndFailSilent(zkw, node)
-    end
-  ensure
-    zkw.close()
-  end
-end
-
-def removeServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
-  servers = getServerNames(hostOrServers, config)
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-  
-  begin
-    for server in servers
-      node = ZKUtil.joinZNode(parentZnode, server)
-      ZKUtil.deleteNodeFailSilent(zkw, node)
-    end
-  ensure
-    zkw.close()
-  end
-end
-
-# list servers in draining mode
-def listServers(options)
-  config = HBaseConfiguration.create()
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-
-  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
-  servers.each {|server| puts server}
-end
-
-hostOrServers = ARGV[1..ARGV.size()]
-
-# Create a logger and disable the DEBUG-level annoying client logging
-def configureLogging(options)
-  apacheLogger = LogFactory.getLog(NAME)
-  # Configure log4j to not spew so much
-  unless (options[:debug]) 
-    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
-    logger.setLevel(org.apache.log4j.Level::WARN)
-    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
-    logger.setLevel(org.apache.log4j.Level::WARN)
-  end
-  return apacheLogger
-end
-
-# Create a logger and save it to ruby global
-$LOG = configureLogging(options)
-case ARGV[0]
-  when 'add'
-    if ARGV.length < 2
-      puts optparse
-      exit 1
-    end
-    addServers(options, hostOrServers)
-  when 'remove'
-    if ARGV.length < 2
-      puts optparse
-      exit 1
-    end
-    removeServers(options, hostOrServers)
-  when 'list'
-    listServers(options)
-  else
-    puts optparse
-    exit 3
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 5c320c0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-hbase_cmd=$3
-echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/functions.py
deleted file mode 100644
index e6e7fb9..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/functions.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
-  """
-  @param heapsize_str: str (e.g '1000m')
-  @param xmn_percent: float (e.g 0.2)
-  @param xmn_max: integer (e.g 512)
-  """
-  heapsize = int(re.search('\d+',heapsize_str).group(0))
-  heapsize_unit = re.search('\D+',heapsize_str).group(0)
-  xmn_val = int(math.floor(heapsize*xmn_percent))
-  xmn_val -= xmn_val % 8
-  
-  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
-  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase.py
deleted file mode 100644
index 2829c7b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-def hbase(name=None # 'master' or 'regionserver' or 'client'
-              ):
-  import params
-
-  Directory( params.hbase_conf_dir,
-      owner = params.hbase_user,
-      group = params.user_group,
-      recursive = True
-  )
-
-  Directory (params.tmp_dir,
-             owner = params.hbase_user,
-             recursive = True
-  )
-
-  Directory (os.path.join(params.local_dir, "jars"),
-             owner = params.hbase_user,
-             group = params.user_group,
-             mode=0775,
-             recursive = True
-  )
-
-  XmlConfig( "hbase-site.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig( "hdfs-site.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if 'hbase-policy' in params.config['configurations']:
-    XmlConfig( "hbase-policy.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hbase-policy'],
-            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
-            owner = params.hbase_user,
-            group = params.user_group
-    )
-  # Manually overriding ownership of file installed by hadoop package
-  else: 
-    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-
-  File(format("{hbase_conf_dir}/hbase-env.sh"),
-       owner = params.hbase_user,
-       content=InlineTemplate(params.hbase_env_sh_template)
-  )     
-       
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
-  )
-
-  hbase_TemplateConfig( 'regionservers')
-
-  if params.security_enabled:
-    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
-  
-  if name != "client":
-    Directory( params.pid_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-  
-    Directory (params.log_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-
-  if (params.log4j_props != None):
-    File(format("{params.hbase_conf_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hbase_user,
-         content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
-    File(format("{params.hbase_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hbase_user
-    )
-  if name in ["master","regionserver"]:
-    params.HdfsDirectory(params.hbase_hdfs_root_dir,
-                         action="create_delayed",
-                         owner=params.hbase_user
-    )
-    params.HdfsDirectory(params.hbase_staging_dir,
-                         action="create_delayed",
-                         owner=params.hbase_user,
-                         mode=0711
-    )
-    params.HdfsDirectory(None, action="create")
-
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
-  import params
-
-  TemplateConfig( format("{hbase_conf_dir}/{name}"),
-      owner = params.hbase_user,
-      template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index 043ad11..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-
-         
-class HbaseClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(name='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_decommission.py
deleted file mode 100644
index a623927..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_decommission.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hbase_decommission(env):
-  import params
-
-  env.set_params(params)
-  kinit_cmd = params.kinit_cmd
-
-  File(params.region_drainer,
-       content=StaticFile("draining_servers.rb"),
-       mode=0755
-  )
-  
-  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
-    hosts = params.hbase_excluded_hosts.split(",")
-  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
-    hosts = params.hbase_included_hosts.split(",")
-
-  if params.hbase_drain_only:
-    for host in hosts:
-      if host:
-        regiondrainer_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
-        Execute(regiondrainer_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-        pass
-    pass
-
-  else:
-    for host in hosts:
-      if host:
-        regiondrainer_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
-        regionmover_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
-
-        Execute(regiondrainer_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-
-        Execute(regionmover_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-      pass
-    pass
-  pass
-  
-
-  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index a26254d..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-from hbase_decommission import hbase_decommission
-
-         
-class HbaseMaster(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(name='master')
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'master',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
-    check_process_status(pid_file)
-
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_decommission(env)
-
-
-if __name__ == "__main__":
-  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index 8d66dcc..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseRegionServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(name='regionserver')
-      
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'regionserver',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'regionserver',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
-    check_process_status(pid_file)
-    
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
-
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_service.py
deleted file mode 100644
index 723d4e2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
-    import params
-  
-    role = name
-    cmd = format("{daemon_script} --config {hbase_conf_dir}")
-    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    
-    if action == 'start':
-      daemon_cmd = format("{cmd} start {role}")
-      
-      Execute ( daemon_cmd,
-        not_if = no_op_test,
-        user = params.hbase_user
-      )
-    elif action == 'stop':
-      daemon_cmd = format("{cmd} stop {role}")
-
-      Execute ( daemon_cmd,
-        user = params.hbase_user,
-        # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
-        timeout = 30,
-        on_timeout = format("{no_op_test} && kill -9 `cat {pid_file}`")
-      )
-      
-      Execute (format("rm -f {pid_file}"))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/params.py
deleted file mode 100644
index 4e4d5de..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/params.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from functions import calc_xmn_from_xms
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-exec_tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  hadoop_bin_dir = format("/usr/phd/current/hadoop-client/bin")
-  daemon_script = format('/usr/phd/current/hbase-client/bin/hbase-daemon.sh')
-  region_mover = format('/usr/phd/current/hbase-client/bin/region_mover.rb')
-  region_drainer = format('/usr/phd/current/hbase-client/bin/draining_servers.rb')
-  hbase_cmd = format('/usr/phd/current/hbase-client/bin/hbase')
-else:
-  hadoop_bin_dir = "/usr/bin"
-  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-  hbase_cmd = "/usr/lib/hbase/bin/hbase"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hbase_conf_dir = "/etc/hbase/conf"
-hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = default("/commandParams/mark_draining_only",False)
-hbase_included_hosts = config['commandParams']['included_hosts']
-
-hbase_user = status_params.hbase_user
-hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-# this is "hadoop-metrics.properties" for 1.x stacks
-metric_prop_file_name = "hadoop-metrics2-hbase.properties"
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['hbase-env']['hbase_log_dir']
-master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
-regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
-regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
-regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-# TODO UPGRADE default, update site during upgrade
-_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
-local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
-
-client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
-master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
-regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
-if 'slave_hosts' in config['clusterHostInfo']:
-  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-else:
-  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
-
-smoke_test_user = config['configurations']['cluster-env']['smokeuser']
-smokeuser_permissions = "RWXCA"
-service_check_data = functions.get_unique_id_and_date()
-user_group = config['configurations']['cluster-env']["user_group"]
-
-if security_enabled:
-  _hostname_lowercase = config['hostname'].lower()
-  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-if security_enabled:
-  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
-else:
-  kinit_cmd = ""
-
-#log4j.properties
-if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
-  log4j_props = config['configurations']['hbase-log4j']['content']
-else:
-  log4j_props = None
-  
-hbase_env_sh_template = config['configurations']['hbase-env']['content']
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
-hbase_staging_dir = "/apps/hbase/staging"
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index 15a306b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
-  
-    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
-      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index 850ec8b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
-hbase_user = config['configurations']['hbase-env']['hbase_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index 0513104..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
deleted file mode 100644
index 55e8461..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,80 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8656
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 458da95..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 38f9721..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 3378983..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,39 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index a93c36c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index 7097481..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index fc6cc37..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 3dcff6d..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,180 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for HDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <property>
-    <name>ipc.server.tcpnodelay</name>
-    <value>true</value>
-    <description>Turn on/off Nagle's algorithm for the TCP socket
-      connection on
-      the server. Setting to true disables the algorithm and may
-      decrease latency
-      with a cost of more/smaller packets.
-    </description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-env.xml
deleted file mode 100644
index ad59361..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-env.xml
+++ /dev/null
@@ -1,200 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_root_logger</name>
-    <value>INFO,RFA</value>
-    <description>Hadoop Root Logger</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <property-type>GROUP</property-type>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <property-type>USER</property-type>
-    <description>User to run HDFS as</description>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir.mount.file</name>
-    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
-    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
-  </property>
-
-  <!-- hadoop-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hadoop-env.sh file</description>
-    <value>
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop home directory
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-{# this is different for PHD1 #}
-# Path to jsvc required by secure PHD 2.0 datanode
-export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER={{hdfs_user}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-if [ -d "/usr/lib/tez" ]; then
-  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
-fi
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
-    </value>
-  </property>
-  
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 41bde16..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>


[02/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
deleted file mode 100644
index 33ed8b1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import subprocess
-import json
-
-RESOURCEMANAGER = 'rm'
-NODEMANAGER = 'nm'
-HISTORYSERVER = 'hs'
-
-STARTED_STATE = 'STARTED'
-RUNNING_STATE = 'RUNNING'
-
-#Return reponse for given path and address
-def getResponse(path, address, ssl_enabled):
-
-  command = "curl"
-  httpGssnegotiate = "--negotiate"
-  userpswd = "-u:"
-  insecure = "-k"# This is smoke test, no need to check CA of server
-  if ssl_enabled:
-    url = 'https://' + address + path
-  else:
-    url = 'http://' + address + path
-      
-  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
-
-  proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-  (stdout, stderr) = proc.communicate()
-  response = json.loads(stdout)
-  if response == None:
-    print 'There is no response for url: ' + str(url)
-    raise Exception('There is no response for url: ' + str(url))
-  return response
-
-#Verify that REST api is available for given component
-def validateAvailability(component, path, addresses, ssl_enabled):
-  responses = {}
-  for address in addresses.split(','):
-    try:
-      responses[address] = getResponse(path, address, ssl_enabled)
-    except Exception as e:
-      print 'Error checking availability status of component.', e
-
-  if not responses:
-    exit(1)
-
-  is_valid = validateAvailabilityResponse(component, responses.values()[0])
-  if not is_valid:
-    exit(1)
-
-#Validate component-specific response
-def validateAvailabilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        print 'Resourcemanager is not started'
-        return False
-
-    elif component == NODEMANAGER:
-      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
-      if node_healthy:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of availability response for ' + str(component), e
-    return False
-
-#Verify that component has required resources to work
-def validateAbility(component, path, addresses, ssl_enabled):
-  responses = {}
-  for address in addresses.split(','):
-    try:
-      responses[address] = getResponse(path, address, ssl_enabled)
-    except Exception as e:
-      print 'Error checking ability of component.', e
-
-  if not responses:
-    exit(1)
-
-  is_valid = validateAbilityResponse(component, responses.values()[0])
-  if not is_valid:
-    exit(1)
-
-#Validate component-specific response that it has required resources to work
-def validateAbilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      nodes = []
-      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
-        nodes = response['nodes']['node']
-      connected_nodes_count = len(nodes)
-      if connected_nodes_count == 0:
-        print 'There is no connected nodemanagers to resourcemanager'
-        return False
-      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
-      active_nodes_count = len(active_nodes)
-
-      if connected_nodes_count == 0:
-        print 'There is no connected active nodemanagers to resourcemanager'
-        return False
-      else:
-        return True
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of ability response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
-  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-  
-  address = options.address
-  ssl_enabled = (options.ssl_enabled) in 'true'
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == NODEMANAGER:
-    path = '/ws/v1/node/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validateAvailability(component, path, address, ssl_enabled)
-
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/nodes'
-    validateAbility(component, path, address, ssl_enabled)
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/application_timeline_server.py
deleted file mode 100644
index c33110f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/application_timeline_server.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from yarn import yarn
-from service import service
-
-class ApplicationTimelineServer(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    #self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='apptimelineserver')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('historyserver', action='start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    service('historyserver', action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    Execute(format("mv {yarn_historyserver_pid_file_old} {yarn_historyserver_pid_file}"),
-            only_if = format("test -e {yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
-    functions.check_process_status(status_params.yarn_historyserver_pid_file)
-
-if __name__ == "__main__":
-  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/historyserver.py
deleted file mode 100644
index 4184dc4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/historyserver.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-class HistoryServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="historyserver")
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('historyserver', action='start', serviceName='mapreduce')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    service('historyserver', action='stop', serviceName='mapreduce')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.mapred_historyserver_pid_file)
-
-if __name__ == "__main__":
-  HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapred_service_check.py
deleted file mode 100644
index f12ea61..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapred_service_check.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class MapReduce2ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
-    input_file = format("/user/{smokeuser}/mapredsmokeinput")
-    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
-
-    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
-    create_file_cmd = format("fs -put /etc/passwd {input_file}")
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
-
-    log_dir = format("{mapred_log_dir_prefix}/{smokeuser}")
-    Directory(log_dir, owner=params.smokeuser, recursive=True)
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
-      Execute(kinit_cmd,
-              user=params.smokeuser
-      )
-
-    ExecuteHadoop(cleanup_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-    ExecuteHadoop(create_file_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir,
-                  logoutput=True
-    )
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-if __name__ == "__main__":
-  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapreduce2_client.py
deleted file mode 100644
index 831e5e8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/mapreduce2_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-
-class MapReduce2Client(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/nodemanager.py
deleted file mode 100644
index 8e153e0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/nodemanager.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-class Nodemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="nodemanager")
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('nodemanager',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('nodemanager',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nodemanager_pid_file)
-
-if __name__ == "__main__":
-  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/params.py
deleted file mode 100644
index 4f0d98f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/params.py
+++ /dev/null
@@ -1,174 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  hadoop_libexec_dir = "/usr/phd/current/hadoop-client/libexec"
-  hadoop_bin = "/usr/phd/current/hadoop-client/sbin"
-  hadoop_bin_dir = "/usr/phd/current/hadoop-client/bin"
-  hadoop_yarn_home = '/usr/phd/current/hadoop-yarn-client'
-  hadoop_mapred2_jar_location = '/usr/phd/current/hadoop-mapreduce-client'
-  mapred_bin = '/usr/phd/current/hadoop-mapreduce-client/sbin'
-  yarn_bin = '/usr/phd/current/hadoop-yarn-client/sbin'
-  yarn_container_bin = '/usr/phd/current/hadoop-yarn-client/bin'
-else:
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-  hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-  mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-  yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-  yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-limits_conf_dir = "/etc/security/limits.d"
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
-
-ulimit_cmd = "ulimit -c unlimited;"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-rm_hosts = config['clusterHostInfo']['rm_host']
-rm_host = rm_hosts[0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
-apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
-ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
-yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
-mapred_env_sh_template = config['configurations']['mapred-env']['content']
-yarn_env_sh_template = config['configurations']['yarn-env']['content']
-
-if len(rm_hosts) > 1:
-  additional_rm_host = rm_hosts[1]
-  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
-else:
-  rm_webui_address = format("{rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-
-nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
-nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
-
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-
-hostname = config['hostname']
-
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = not len(ats_host) == 0
-
-if security_enabled:
-  _rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
-  _rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
-  _rm_principal_name = _rm_principal_name.replace('_HOST',hostname.lower())
-  
-  rm_kinit_cmd = format("{kinit_path_local} -kt {_rm_keytab} {_rm_principal_name};")
-
-  # YARN timeline security options are only available in PHD Champlain
-  if has_ats:
-    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
-    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
-    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
-    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
-else:
-  rm_kinit_cmd = ""
-  yarn_timelineservice_kinit_cmd = ""
-
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
-jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
-
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
-
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-min_user_id = config['configurations']['yarn-env']['min_user_id']
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/resourcemanager.py
deleted file mode 100644
index 4d40d68..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    yarn(name='resourcemanager')
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('resourcemanager',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def refreshqueues(self, env):
-    import params
-
-    self.configure(env)
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='refreshQueues'
-    )
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    rm_kinit_cmd = params.rm_kinit_cmd
-    yarn_user = params.yarn_user
-    conf_dir = params.hadoop_conf_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd,
-            environment= {'PATH' : params.execute_path },
-            user=yarn_user)
-      pass
-    pass
-
-
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service.py
deleted file mode 100644
index 466f637..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-def service(componentName, action='start', serviceName='yarn'):
-
-  import params
-
-  if serviceName == 'mapreduce' and componentName == 'historyserver':
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
-    usr = params.mapred_user
-  else:
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
-    usr = params.yarn_user
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
-    check_process = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-
-    # Remove the pid file if its corresponding process is not running.
-    File(pid_file,
-         action="delete",
-         not_if=check_process)
-
-    # Attempt to start the process. Internally, this is skipped if the process is already running.
-    Execute(daemon_cmd,
-            user=usr,
-            not_if=check_process
-    )
-
-    # Ensure that the process with the expected PID exists.
-    Execute(check_process,
-            user=usr,
-            not_if=check_process,
-            initial_wait=5
-    )
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {componentName}")
-    Execute(daemon_cmd,
-            user=usr)
-
-    File(pid_file,
-         action="delete")
-
-  elif action == 'refreshQueues':
-    refresh_cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
-
-    Execute(refresh_cmd,
-            user=usr,
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service_check.py
deleted file mode 100644
index 7189664..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/service_check.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    run_yarn_check_cmd = format("yarn --config {hadoop_conf_dir} node -list")
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    validateStatusFileName = "validateYarnComponentStatus.py"
-    validateStatusFilePath = format("{tmp_dir}/{validateStatusFileName}")
-    python_executable = sys.executable
-    validateStatusCmd = format("{python_executable} {validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName),
-         mode=0755
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            user=params.smokeuser,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd,
-            path=params.execute_path,
-            user=params.smokeuser
-    )
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/status_params.py
deleted file mode 100644
index 498a885..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/status_params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
-mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
-yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn.py
deleted file mode 100644
index df3a9d2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def yarn(name = None):
-  import params
-
-
-  if name in ["nodemanager","historyserver"]:
-    if params.yarn_log_aggregation_enabled:
-      params.HdfsDirectory(params.yarn_nm_app_log_dir,
-                           action="create_delayed",
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=0777,
-                           recursive_chmod=True
-      )
-    params.HdfsDirectory("/mapred",
-                         action="create_delayed",
-                         owner=params.mapred_user
-    )
-    params.HdfsDirectory("/mapred/system",
-                         action="create_delayed",
-                         owner=params.hdfs_user
-    )
-    params.HdfsDirectory(params.mapreduce_jobhistory_intermediate_done_dir,
-                         action="create_delayed",
-                         owner=params.mapred_user,
-                         group=params.user_group,
-                         mode=0777
-    )
-
-    params.HdfsDirectory(params.mapreduce_jobhistory_done_dir,
-                         action="create_delayed",
-                         owner=params.mapred_user,
-                         group=params.user_group,
-                         mode=01777
-    )
-    params.HdfsDirectory(None, action="create")
-
-  if name == "nodemanager":
-    Directory(params.nm_local_dirs.split(',') + params.nm_log_dirs.split(','),
-              owner=params.yarn_user,
-              recursive=True,
-              ignore_failures=True,
-              )
-
-  Directory([params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory([params.mapred_pid_dir, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            recursive=True
-  )
-  Directory([params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            recursive=True,
-            ignore_failures=True,
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  if name == 'resourcemanager':
-    File(params.yarn_job_summary_log,
-       owner=params.yarn_user,
-       group=params.user_group
-    )
-  elif name == 'apptimelineserver':
-    Directory(params.ats_leveldb_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       recursive=True
-    )
-
-  File(params.rm_nodes_exclude_path,
-       owner=params.yarn_user,
-       group=params.user_group
-  )
-
-  File(format("{limits_conf_dir}/yarn.conf"),
-       mode=0644,
-       content=Template('yarn.conf.j2')
-  )
-
-  File(format("{limits_conf_dir}/mapreduce.conf"),
-       mode=0644,
-       content=Template('mapreduce.conf.j2')
-  )
-
-  File(format("{hadoop_conf_dir}/yarn-env.sh"),
-       owner=params.yarn_user,
-       group=params.user_group,
-       mode=0755,
-       content=InlineTemplate(params.yarn_env_sh_template)
-  )
-
-  if params.security_enabled:
-    container_executor = format("{yarn_container_bin}/container-executor")
-    File(container_executor,
-         group=params.yarn_executor_container_group,
-         mode=06050
-    )
-
-    File(format("{hadoop_conf_dir}/container-executor.cfg"),
-         group=params.user_group,
-         mode=0644,
-         content=Template('container-executor.cfg.j2')
-    )
-
-
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  File(format("{hadoop_conf_dir}/mapred-env.sh"),
-       owner=tc_owner,
-       content=InlineTemplate(params.mapred_env_sh_template)
-  )
-
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
-    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn_client.py
deleted file mode 100644
index f6b4b44..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/scripts/yarn_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-
-class YarnClient(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/container-executor.cfg.j2
deleted file mode 100644
index c6f1ff6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/container-executor.cfg.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs={{nm_local_dirs}}
-yarn.nodemanager.log-dirs={{nm_log_dirs}}
-yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
-banned.users=hdfs,yarn,mapred,bin
-min.user.id={{min_user_id}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c7ce416..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/mapreduce.conf.j2
deleted file mode 100644
index b996645..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/mapreduce.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{mapred_user}}   - nofile 32768
-{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/taskcontroller.cfg.j2
deleted file mode 100644
index 3d5f4f2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/yarn.conf.j2
deleted file mode 100644
index 3bd7a45..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/templates/yarn.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{yarn_user}}   - nofile 32768
-{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
deleted file mode 100644
index 4c305b0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <property-type>USER</property-type>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-  
-  <!-- zookeeper-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for zookeeper-env.sh file</description>
-    <value>
-export JAVA_HOME={{java64_home}}
-export ZOOKEEPER_HOME={{zk_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
deleted file mode 100644
index 6fcf5bc..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ /dev/null
@@ -1,101 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index f304c83..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <displayName>ZooKeeper</displayName>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.phd.3.0.0.0</version>
-      <components>
-
-        <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <displayName>ZooKeeper Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZOOKEEPER_CLIENT</name>
-          <displayName>ZooKeeper Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>zookeeper-env.sh</fileName>
-              <dictionaryName>zookeeper-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>zookeeper-log4j</dictionaryName>
-            </configFile>            
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>zookeeper</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>zookeeper-log4j</config-type>
-        <config-type>zookeeper-env</config-type>
-        <config-type>zoo.cfg</config-type>
-      </configuration-dependencies>
-      <restartRequiredAfterChange>true</restartRequiredAfterChange>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkEnv.sh
deleted file mode 100644
index fa1b832..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkServer.sh
deleted file mode 100644
index dd75a58..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkService.sh
deleted file mode 100644
index 6e167a4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su -s /bin/bash - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkSmoke.sh
deleted file mode 100644
index f31a6ea..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  su -s /bin/bash - $smoke_user -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-su -s /bin/bash - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-su -s /bin/bash - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su -s /bin/bash - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
-  su -s /bin/bash - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
-  output=$(su -s /bin/bash - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su -s /bin/bash - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""


[07/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index c1a792c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,166 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if check_cpu_on %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_cpu.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-define command {
-        command_name    check_cpu_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_cpu_ha.php -h ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -u $ARG9$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_blocks.php -h ^^ -p $ARG2$ -s $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_capacity.php -h ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_rpcq_latency_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_rpcq_latency_ha.php -h ^^ -p $ARG3$ -n $ARG2$ -w $ARG4$ -c $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -s $ARG10$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_webui_ha
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $ARG2$ -- $USER1$/check_webui_ha.sh $ARG1$ ^^ $ARG3$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
-
-define command{
-        command_name check_tcp_wrapper
-        command_line  /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $HOSTADDRESS$ -- $USER1$/check_tcp -H ^^ -p $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name check_checkpoint_time
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py or $ARG1$ -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_checkpoint_time.py -H ^^ -p $ARG2$ -w $ARG3$ -c $ARG4$ -t $ARG5$ -x $ARG6$ -s $ARG7$
-       }
-
-define command{
-        command_name check_tcp_wrapper_sasl
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py and $HOSTADDRESS$ -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_hive_thrift_port.py -H ^^ -p $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name check_ambari
-        command_line /var/lib/ambari-agent/ambari-python-wrap $USER1$/mm_wrapper.py legacy_check_wrapper -- /var/lib/ambari-agent/ambari-python-wrap $USER1$/check_ambari_alerts.py -H $HOSTADDRESS$ -f $ARG1$ -n $ARG2$
-       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index 05c1252..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 8bcc980..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,53 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for host in all_hosts %}
-define host {
-        alias                     {{host}}
-        host_name                 {{host}}
-        use                       {{host_template}}
-        address                   {{host}}
-        check_command             check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        check_interval            0.25
-        retry_interval            0.25
-        max_check_attempts        4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index aee9b15..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,113 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-
-{% if hostgroup_defs['namenode'] or 
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-  {% if hostgroup_defs['namenode'] != None %}
-  define servicegroup {
-    servicegroup_name  HDFS
-    alias  HDFS Checks
-  }
-  {% endif %}
-{% endif %} 
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] or hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  HIVE
-  alias  HIVE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
-{% if hostgroup_defs['nimbus'] or
-  hostgroup_defs['drpc-server'] or
-  hostgroup_defs['storm_ui'] or
-  hostgroup_defs['supervisors'] or
-  hostgroup_defs['storm_rest_api']%}
-define servicegroup {
-  servicegroup_name  STORM
-  alias  STORM Checks
-}
-{% endif %}
-{% if hostgroup_defs['falcon-server'] %}
-define servicegroup {
-  servicegroup_name  FALCON
-  alias  FALCON Checks
-}
-{% endif %}
-
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index f278260..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,791 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-{% if hostgroup_defs['namenode'] != None %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{# used only for HDP2 #}
-{% if hostgroup_defs['namenode'] and hostgroup_defs['namenode'] != None and dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::NameNode HA Healthy
-        servicegroups           HDFS
-        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      5
-}
-{% endif %}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp_wrapper!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['namenode'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name	        ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp_wrapper!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-{% endif %}
-
-
-{% if hostgroup_defs['snamenode'] and hostgroup_defs['namenode'] != None %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ snamenode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_ui'] %}
-# STORM UI Checks
-define service {
-        hostgroup_name          storm_ui
-        use                     hadoop-service
-        service_description     STORM_UI_SERVER::Storm UI on {{ hostgroup_defs['storm_ui'][0] }}
-        servicegroups           STORM
-        check_command           check_webui!storm_ui!{{ storm_ui_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_ui'] %}
-# STORM UI Checks
-define service {
-        hostgroup_name          storm_ui
-        use                     hadoop-service
-        service_description     STORM_UI_SERVER::Storm UI Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ storm_ui_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['nimbus'] %}
-# Nimbus Checks
-define service {
-        hostgroup_name          nimbus
-        use                     hadoop-service
-        service_description     NIMBUS::Nimbus process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ nimbus_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['drpc-server'] %}
-# drpc Checks
-define service {
-        hostgroup_name          drpc-server
-        use                     hadoop-service
-        service_description     DRPC_SERVER::DRPC Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ drpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['storm_rest_api'] %}
-# Storm REST API Checks
-define service {
-        hostgroup_name          storm_rest_api
-        use                     hadoop-service
-        service_description     STORM_REST_API::Storm REST API Server process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ storm_rest_api_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER Supervisor Checks
-{% if hostgroup_defs['supervisors'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     SUPERVISOR::Percent Supervisors live
-        servicegroups           STORM
-        check_command           check_aggregate!"SUPERVISOR::Supervisors process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          supervisors
-        use                     hadoop-service
-        service_description     SUPERVISOR::Supervisors process
-        servicegroups           STORM
-        check_command           check_tcp_wrapper!{{ supervisor_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['namenode'] and hostgroup_defs['namenode'] != None %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_cpu!{{ namenode_port }}!200%!250%!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{nn_ha_host_port_map[namenode_hostname]}}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        host_name               {{namenode_host[0]}}
-        use                     hadoop-service
-        service_description     NAMENODE::Last checkpoint time
-        servicegroups           HDFS
-        check_command           check_checkpoint_time!{{ nn_hosts_string }}!{{ namenode_port }}!200!200!{{ dfs_namenode_checkpoint_period }}!{{dfs_namenode_checkpoint_txns}}!{{str(hdfs_ssl_enabled).lower()}}
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!{{ nn_metrics_property }}!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1
-        max_check_attempts      1
-}
-
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui_ha!resourcemanager!{{ rm_hosts_in_str }}!{{ rm_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu_ha!{{ rm_hosts_in_str }}!{{ rm_port }}!200%!250%!{{ str(yarn_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency_ha!{{ rm_hosts_in_str }}!ResourceManager!{{ rm_port }}!3000!5000!{{ str(yarn_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-{%  for rm_host in _rm_host  %}
-define service {
-        host_name               {{ rm_host }}
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process on {{ rm_host }}
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ rm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endfor %}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ nm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(yarn_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!{{ hs_port }}!200%!250%!{{ str(mapreduce_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(mapreduce_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp_wrapper!{{ hs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{ journalnode_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] and hostgroup_defs['namenode'] != None %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp_wrapper!{{datanode_port}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hdfs_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2 
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp_wrapper!{{ clientPort }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_rs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if hostgroup_defs['hbasemasters'] %}
-{% if check_cpu_on %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization
-        servicegroups           HBASE
-        check_command           check_cpu_ha!{{ hbase_master_hosts_in_str }}!{{ hbase_master_port }}!200%!250%!false!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{%  endif %}
-{%  endif %}
-
-{%  for hbasemaster in hbase_master_hosts  %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp_wrapper!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper!{{ hive_metastore_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# HIVE Server check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-SERVER::HiveServer2 process
-        servicegroups           HIVE
-        check_command           check_tcp_wrapper_sasl!{{ hive_server_port }}!{{ '--security-enabled' if security_enabled else '' }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           HIVE
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-#FALCON checks
-{% if hostgroup_defs['falcon-server'] %}
-define service {
-        hostgroup_name          falcon-server
-        service_description     FALCON::Falcon Server process
-        servicegroups           FALCON
-        check_command           check_tcp_wrapper!{{ falcon_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          falcon-server
-        service_description     FALCON::Falcon Server Web UI
-        servicegroups           FALCON
-        check_command           check_webui!falconserver!{{ falcon_port }}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['ats-servers'] %}
-define service {
-        hostgroup_name          ats-servers
-        use                     hadoop-service
-        service_description     APP_TIMELINE_SERVER::App Timeline Server process
-        servicegroups           YARN
-        check_command           check_tcp_wrapper!{{ ahs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_ambari!/var/nagios/ambari.json!flume_agent
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-
-


[13/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
deleted file mode 100644
index 2010c02..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
+++ /dev/null
@@ -1,29 +0,0 @@
-Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
-Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
-Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
-Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
-Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
-Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
-Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
-Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
-Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
-Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
-Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
-Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
-Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
-Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
-Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
-Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
-Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
-Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
-Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
-Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
-The cluster is balanced. Exiting...
-Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
deleted file mode 100644
index 0cce48c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import time
-import sys
-from threading import Thread
-
-
-def write_function(path, handle, interval):
-  with open(path) as f:
-      for line in f:
-          handle.write(line)
-          handle.flush()
-          time.sleep(interval)
-          
-thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1.5))
-thread.start()
-
-threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 1.5 * 0.023))
-threaderr.start()
-
-thread.join()  
-
-
-def rebalancer_out():
-  write_function('balancer.log', sys.stdout)
-  
-def rebalancer_err():
-  write_function('balancer-err.log', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index c1a66fb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-from hdfs import hdfs
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs()
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs.py
deleted file mode 100644
index 873aa15..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def hdfs(name=None):
-  import params
-
-  # On some OS this folder could be not exists, so we will create it before pushing there files
-  Directory(params.limits_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
-       owner=tc_owner,
-       content=Template("slaves.j2")
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index 3b4cf3e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs import hdfs
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-    hdfs()
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index c93c6e4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
-from utils import service
-
-
-def create_dirs(data_dir, params):
-  """
-  :param data_dir: The directory to create
-  :param params: parameters
-  """
-  Directory(data_dir,
-            recursive=True,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            ignore_failures=True
-  )
-
-
-def datanode(action=None):
-  import params
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0751,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-    handle_dfs_data_dir(create_dirs, params)
-
-  elif action == "start" or action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 31fc2f1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,160 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-def namenode(action=None, do_format=True):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if do_format:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-    if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
-    else:
-      dfs_check_nn_status_cmd = None
-
-    namenode_safe_mode_off = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} dfsadmin -safemode get' | grep 'Safe mode is OFF'")
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-              user = params.hdfs_user)
-    Execute(namenode_safe_mode_off,
-            tries=40,
-            try_sleep=10,
-            only_if=dfs_check_nn_status_cmd #skip when HA not active
-    )
-    create_hdfs_directories(dfs_check_nn_status_cmd)
-  if action == "stop":
-    service(
-      action="stop", name="namenode", 
-      user=params.hdfs_user
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_hdfs_directories(check):
-  import params
-
-  params.HdfsDirectory("/tmp",
-                       action="create_delayed",
-                       owner=params.hdfs_user,
-                       mode=0777
-  )
-  params.HdfsDirectory(params.smoke_hdfs_user_dir,
-                       action="create_delayed",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-  )
-  params.HdfsDirectory(None, action="create",
-                       only_if=check #skip creation when HA not active
-  )
-
-def format_namenode(force=None):
-  import params
-
-  old_mark_dir = params.namenode_formatted_old_mark_dir
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True,
-                    bin_dir=params.hadoop_bin_dir,
-                    conf_dir=hadoop_conf_dir)
-    else:
-      File(format("{tmp_dir}/checkForFormat.sh"),
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
-        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
-              not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
-      )
-    
-      Directory(mark_dir,
-        recursive = True
-      )
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-  dn_kinit_cmd = params.dn_kinit_cmd
-  
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-  
-  Execute(dn_kinit_cmd,
-          user=hdfs_user
-  )
-
-  if params.dfs_ha_enabled:
-    # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-    # need to execute each command scoped to a particular namenode
-    nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-  else:
-    nn_refresh_cmd = format('dfsadmin -refreshNodes')
-  ExecuteHadoop(nn_refresh_cmd,
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True,
-                bin_dir=params.hadoop_bin_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_rebalance.py
deleted file mode 100644
index 1dc545e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_rebalance.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import re
-
-class HdfsParser():
-  def __init__(self):
-    self.initialLine = None
-    self.state = None
-  
-  def parseLine(self, line):
-    hdfsLine = HdfsLine()
-    type, matcher = hdfsLine.recognizeType(line)
-    if(type == HdfsLine.LineType.HeaderStart):
-      self.state = 'PROCESS_STARTED'
-    elif (type == HdfsLine.LineType.Progress):
-      self.state = 'PROGRESS'
-      hdfsLine.parseProgressLog(line, matcher)
-      if(self.initialLine == None): self.initialLine = hdfsLine
-      
-      return hdfsLine 
-    elif (type == HdfsLine.LineType.ProgressEnd):
-      self.state = 'PROCESS_FINISED'
-    return None
-    
-class HdfsLine():
-  
-  class LineType:
-    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
-  
-  
-  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
-  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
-  
-  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
-  PROGRESS_PATTERN = re.compile(
-                            "(?P<date>.*?)\s+" + 
-                            "(?P<iteration>\d+)\s+" + 
-                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
-                            MEMORY_PATTERN % (2,2,2) + "\s+" +
-                            MEMORY_PATTERN % (3,3,3)
-                            )
-  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
-  
-  def __init__(self):
-    self.date = None
-    self.iteration = None
-    self.bytesAlreadyMoved = None 
-    self.bytesLeftToMove = None
-    self.bytesBeingMoved = None 
-    self.bytesAlreadyMovedStr = None 
-    self.bytesLeftToMoveStr = None
-    self.bytesBeingMovedStr = None 
-  
-  def recognizeType(self, line):
-    for (type, pattern) in (
-                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
-                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
-                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
-                            ):
-      m = re.match(pattern, line)
-      if m:
-        return type, m
-    return HdfsLine.LineType.Unknown, None
-    
-  def parseProgressLog(self, line, m):
-    '''
-    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
-    
-    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-    
-    Throws AmbariException in case of parsing errors
-
-    '''
-    m = re.match(self.PROGRESS_PATTERN, line)
-    if m:
-      self.date = m.group('date') 
-      self.iteration = int(m.group('iteration'))
-       
-      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
-      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
-      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
-       
-      self.bytesAlreadyMovedStr = m.group('memmult_1') 
-      self.bytesLeftToMoveStr = m.group('memmult_2')
-      self.bytesBeingMovedStr = m.group('memmult_3') 
-    else:
-      raise AmbariException("Failed to parse line [%s]") 
-  
-  def parseMemory(self, memorySize, multiplier_type):
-    try:
-      factor = self.MEMORY_SUFFIX.index(multiplier_type)
-    except ValueError:
-      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
-    
-    return float(memorySize) * (1024 ** factor)
-  def toJson(self):
-    return {
-            'timeStamp' : self.date,
-            'iteration' : self.iteration,
-            
-            'dataMoved': self.bytesAlreadyMovedStr,
-            'dataLeft' : self.bytesLeftToMoveStr,
-            'dataBeingMoved': self.bytesBeingMovedStr,
-            
-            'bytesMoved': self.bytesAlreadyMoved,
-            'bytesLeft' : self.bytesLeftToMove,
-            'bytesBeingMoved': self.bytesBeingMoved,
-          }
-  def __str__(self):
-    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index 49241b4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group)
-  elif action == "start" or action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index de18c88..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from hdfs import hdfs
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    env.set_params(params)
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index a0b07aa..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-from hdfs import hdfs
-import time
-import json
-import subprocess
-import hdfs_rebalance
-import sys
-import os
-from datetime import datetime
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-    #TODO we need this for HA because of manual steps
-    self.configure(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    Execute(format("echo '{namenode_pid_file}' >> /1.txt"))
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-  
-    
-  def rebalancehdfs(self, env):
-    import params
-    env.set_params(params)
-
-    name_node_parameters = json.loads( params.name_node_params )
-    threshold = name_node_parameters['threshold']
-    _print("Starting balancer with threshold = %s\n" % threshold)
-    
-    def calculateCompletePercent(first, current):
-      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
-    
-    
-    def startRebalancingProcess(threshold):
-      rebalanceCommand = format('export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
-      return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
-    
-    command = startRebalancingProcess(threshold)
-    
-    basedir = os.path.join(env.config.basedir, 'scripts')
-    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
-      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
-      command = ['python','hdfs-command.py']
-    
-    _print("Executing command %s\n" % command)
-    
-    parser = hdfs_rebalance.HdfsParser()
-    proc = subprocess.Popen(
-                            command, 
-                            stdout=subprocess.PIPE, 
-                            shell=False,
-                            close_fds=True,
-                            cwd=basedir
-                           )
-    for line in iter(proc.stdout.readline, ''):
-      _print('[balancer] %s %s' % (str(datetime.now()), line ))
-      pl = parser.parseLine(line)
-      if pl:
-        res = pl.toJson()
-        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl) 
-        
-        self.put_structured_out(res)
-      elif parser.state == 'PROCESS_FINISED' : 
-        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
-        self.put_structured_out({'completePercent' : 1})
-        break
-    
-    proc.stdout.close()
-    proc.wait()
-    if proc.returncode != None and proc.returncode != 0:
-      raise Fail('Hdfs rebalance process exited with error. See the log output')
-      
-def _print(line):
-  sys.stdout.write(line)
-  sys.stdout.flush()
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 8c2dacb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,235 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  mapreduce_libs_path = "/usr/phd/current/hadoop-mapreduce-client/*"
-  hadoop_libexec_dir = "/usr/phd/current/hadoop-client/libexec"
-  hadoop_bin = "/usr/phd/current/hadoop-client/sbin"
-  hadoop_bin_dir = "/usr/phd/current/hadoop-client/bin"
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_bin_dir = "/usr/bin"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-limits_conf_dir = "/etc/security/limits.d"
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
-ulimit_cmd = "ulimit -c unlimited; "
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-falcon_user = config['configurations']['falcon-env']['falcon_user']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
-
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-webhcat_user = config['configurations']['hive-env']['hcat_user']
-hcat_user = config['configurations']['hive-env']['hcat_user']
-hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = status_params.hdfs_user
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-
-#hadoop params
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
-
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-namenode_formatted_old_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
-
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
-
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
-
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-namenode_id = None
-namenode_rpc = None
-
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-      namenode_rpc = nn_host
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-  
-  
-if security_enabled:
-  _dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-  _dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-  _dn_principal_name = _dn_principal_name.replace('_HOST',hostname.lower())
-  
-  dn_kinit_cmd = format("{kinit_path_local} -kt {_dn_keytab} {_dn_principal_name};")
-else:
-  dn_kinit_cmd = ""
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
-
-io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
-if not "com.hadoop.compression.lzo" in io_compression_codecs:
-  exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
-else:
-  exclude_packages = []
-name_node_params = default("/commandParams/namenode", None)
-
-#hadoop params
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-stack_version = str(config['hostLevelParams']['stack_version'])
-
-stack_is_champlain_or_further = not (stack_version.startswith('2.0') or stack_version.startswith('2.1'))
-
-if stack_version.startswith('2.0') and System.get_instance().os_family != "suse":
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/phd-utils"
-else:
-  jsvc_path = "/usr/lib/phd-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 36e26d6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,120 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = functions.get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir}")
-    chmod_command = format("fs -chmod 777 {dir}")
-    test_dir_exists = format("su -s /bin/bash - {smoke_user} -c '{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}'")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su -s /bin/bash - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=20,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(chmod_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su -s /bin/bash - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 5eb25d2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-from hdfs import hdfs
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env, params.exclude_packages)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.configure(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 1dd4750..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-phd_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{phd_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{phd_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{phd_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{phd_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{phd_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 5e1221a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import re
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False):
-  import params
-
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  check_process = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  hadoop_env_exports = {
-    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
-  }
-
-  if params.security_enabled and name == "datanode":
-    dfs_dn_port = get_port(params.dfs_dn_addr)
-    dfs_dn_http_port = get_port(params.dfs_dn_http_addr)
-    dfs_dn_https_port = get_port(params.dfs_dn_https_addr)
-
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if params.dfs_http_policy == "HTTPS_ONLY":
-      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-    elif params.dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-
-    # Calculate HADOOP_SECURE_DN_* env vars, but not append them yet
-    # These variables should not be set when starting secure datanode as a non-root
-    ## On secure datanodes, user to run the datanode as after dropping privileges
-    hadoop_secure_dn_user = params.hdfs_user
-    ## Where log files are stored in the secure data environment.
-    hadoop_secure_dn_log_dir = format("{hdfs_log_dir_prefix}/{hadoop_secure_dn_user}")
-    ## The directory where pid files are stored in the secure data environment.
-    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hadoop_secure_dn_user}")
-    hadoop_secure_dn_exports = {
-      'HADOOP_SECURE_DN_USER' : hadoop_secure_dn_user,
-      'HADOOP_SECURE_DN_LOG_DIR' : hadoop_secure_dn_log_dir,
-      'HADOOP_SECURE_DN_PID_DIR' : hadoop_secure_dn_pid_dir
-    }
-    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
-
-    # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not params.stack_is_champlain_or_further or secure_ports_are_in_use:
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-      if params.stack_is_champlain_or_further:
-        hadoop_env_exports.update(hadoop_secure_dn_exports)
-
-    if action == 'stop' and params.stack_is_champlain_or_further and \
-      os.path.isfile(hadoop_secure_dn_pid_file):
-        # We need special handling for this case to handle the situation
-        # when we configure non-root secure DN and then restart it
-        # to handle new configs. Otherwise we will not be able to stop
-        # a running instance
-        user = "root"
-        try:
-          with open(hadoop_secure_dn_pid_file, 'r') as f:
-            pid = f.read()
-          os.kill(int(pid), 0)
-          hadoop_env_exports.update(hadoop_secure_dn_exports)
-        except IOError:
-          pass  # Can not open pid file
-        except ValueError:
-          pass  # Pid file content is invalid
-        except OSError:
-          pass  # Process is not running
-
-
-  hadoop_env_exports_str = ''
-  for exp in hadoop_env_exports.items():
-    hadoop_env_exports_str += "export {0}={1} && ".format(exp[0], exp[1])
-
-  hadoop_daemon = format(
-    "{hadoop_env_exports_str}"
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} {action} {name}'")
-
-  service_is_up = check_process if action == "start" else None
-  #remove pid file from dead process
-  File(pid_file,
-       action="delete",
-       not_if=check_process,
-  )
-  Execute(daemon_cmd,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-    )
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index fd9bbfa..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from hdfs import hdfs
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index a92cdc1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/hdfs.conf.j2
deleted file mode 100644
index d58a6f5..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/slaves.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/templates/slaves.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hcat-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hcat-env.xml
deleted file mode 100644
index 91b402b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hcat-env.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- hcat-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hcat-env.sh file</description>
-    <value>
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements. See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership. The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License. You may obtain a copy of the License at
-      #
-      # http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing, software
-      # distributed under the License is distributed on an "AS IS" BASIS,
-      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      # See the License for the specific language governing permissions and
-      # limitations under the License.
-
-      JAVA_HOME={{java64_home}}
-      HCAT_PID_DIR={{hcat_pid_dir}}/
-      HCAT_LOG_DIR={{hcat_log_dir}}/
-      HCAT_CONF_DIR={{hcat_conf_dir}}
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-      #DBROOT is the path where the connector jars are downloaded
-      DBROOT={{hcat_dbroot}}
-      USER={{hcat_user}}
-      METASTORE_PORT={{hive_metastore_port}}
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-env.xml
deleted file mode 100644
index 1b8b75a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-env.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hive_database_type</name>
-    <value>postgres</value>
-    <description>Default HIVE DB type.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value>New PostgreSQL Database</value>
-    <description>
-      Property that determines whether the HIVE DB is managed by Ambari.
-    </description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>PostgreSQL</value>
-    <description>Database type.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-  </property>
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <property-type>USER</property-type>
-    <description>Hive User.</description>
-  </property>
-
-  <!--HCAT-->
-
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <property-type>USER</property-type>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <property-type>USER</property-type>
-    <description>WebHCat User.</description>
-  </property>
-  
-  <!-- hive-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hive-env.sh file</description>
-    <value>
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hive_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{hive_config_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-elif [ -d "/usr/lib/hive-hcatalog/" ]; then
-  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
-else
-  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
-fi
-export METASTORE_PORT={{hive_metastore_port}}
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-exec-log4j.xml
deleted file mode 100644
index fb852f7..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-exec-log4j.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom hive-exec-log4j</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-
-hive.log.threshold=ALL
-hive.root.logger=INFO,FA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.query.id=hadoop
-hive.log.file=${hive.query.id}.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=${hive.log.threshold}
-
-#
-# File Appender
-#
-
-log4j.appender.FA=org.apache.log4j.FileAppender
-log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
-log4j.appender.FA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,FA
-log4j.category.Datastore=ERROR,FA
-log4j.category.Datastore.Schema=ERROR,FA
-log4j.category.JPOX.Datastore=ERROR,FA
-log4j.category.JPOX.Plugin=ERROR,FA
-log4j.category.JPOX.MetaData=ERROR,FA
-log4j.category.JPOX.Query=ERROR,FA
-log4j.category.JPOX.General=ERROR,FA
-log4j.category.JPOX.Enhancer=ERROR,FA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-log4j.xml
deleted file mode 100644
index cb8d3a9..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-log4j.xml
+++ /dev/null
@@ -1,120 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.log.threshold=ALL
-hive.root.logger=INFO,DRFA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=${hive.log.threshold}
-
-#
-# Daily Rolling File Appender
-#
-# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
-# for different CLI session.
-#
-# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
-    </value>
-  </property>
-
-</configuration>


[17/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index 08822eb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,201 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-# Define some default values that can be overridden by system properties
-# To change daemon root logger use hadoop_root_logger in hadoop-env
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 6499868..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,430 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.txns</name>
-    <value>1000000</value>
-    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
-      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
-      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50475</value>
-    <description>
-      The datanode https server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for HDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>
-      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
-      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
-      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
-      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
-      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml
deleted file mode 100644
index 14063eb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,226 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.4.1.phd.3.0.0.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <displayName>NameNode</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REBALANCEHDFS</name>
-              <background>true</background>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <displayName>DataNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <displayName>SNameNode</displayName>
-          
-          <cardinality>1</cardinality>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <displayName>HDFS Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hdfs-site.xml</fileName>
-              <dictionaryName>hdfs-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>                          
-            <configFile>
-              <type>env</type>
-              <fileName>hadoop-env.sh</fileName>
-              <dictionaryName>hadoop-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <displayName>JournalNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <displayName>ZKFailoverController</displayName>
-          <category>SLAVE</category>
-          
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <name>hadoop-lzo</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>snappy</name>
-            </package>
-            <package>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <name>lzo</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>hadoop-lzo-native</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>hadoop-libhdfs</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>libsnappy1</name>
-            </package>
-            <package>
-              <name>libsnappy-dev</name>
-            </package>
-            <package>
-              <name>liblzo2-2</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>hadoop-hdfs</name>
-            </package>
-            <package>
-              <name>libhdfs0</name>
-            </package>
-            <package>
-              <name>libhdfs0-dev</name>
-            </package>
-          </packages>
-        </osSpecific>
-            
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>


[16/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
deleted file mode 100644
index ca02f99..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
+++ /dev/null
@@ -1,7860 +0,0 @@
-{
-  "NAMENODE": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/dfs/FSNamesystem/TotalLoad": {
-              "metric": "dfs.FSNamesystem.TotalLoad",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotal": {
-              "metric": "dfs.FSNamesystem.CapacityTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsed": {
-              "metric": "dfs.FSNamesystem.CapacityUsed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemaining": {
-              "metric": "dfs.FSNamesystem.CapacityRemaining",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-              "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/BlockCapacity": {
-              "metric": "dfs.FSNamesystem.BlockCapacity",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetListingOps": {
-              "metric": "dfs.namenode.GetListingOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesAppended": {
-              "metric": "dfs.namenode.FilesAppended",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/fsync_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/renewLease_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getFileInfo_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/complete_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setPermission_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-              "metric": "dfs.FSNamesystem.CapacityTotalGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setOwner_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocations_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-              "metric": "dfs.FSNamesystem.CapacityUsedGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/AddBlockOps": {
-              "metric": "dfs.namenode.AddBlockOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesDeleted": {
-              "metric": "dfs.namenode.FilesDeleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Syncs_avg_time": {
-              "metric": "dfs.namenode.SyncsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/blockReport_avg_time": {
-              "metric": "dfs.namenode.BlockReportAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getFileInfo_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getEditLogSize_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReceived_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/versionRequest_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/versionRequest_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/addBlock_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesCreated": {
-              "metric": "dfs.namenode.FilesCreated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rename_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setSafeMode_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setPermission_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesRenamed": {
-              "metric": "dfs.namenode.FilesRenamed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/register_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setReplication_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-              "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetBlockLocations": {
-              "metric": "dfs.namenode.GetBlockLocations",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/fsync_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/create_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-              "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/delete_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FileInfoOps": {
-              "metric": "dfs.namenode.FileInfoOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/DeleteFileOps": {
-              "metric": "dfs.namenode.DeleteFileOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReport_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setSafeMode_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-              "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getEditLogSize_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesInGetListingOps": {
-              "metric": "dfs.namenode.FilesInGetListingOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/complete_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollFsImage_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Syncs_num_ops": {
-              "metric": "dfs.namenode.SyncsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReceived_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setReplication_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollEditLog_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/FilesTotal": {
-              "metric": "dfs.FSNamesystem.FilesTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ExcessBlocks": {
-              "metric": "dfs.FSNamesystem.ExcessBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/blockReport_num_ops": {
-              "metric": "dfs.namenode.BlockReportNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/SafemodeTime": {
-              "metric": "dfs.namenode.SafemodeTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollFsImage_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/mkdirs_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-              "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/BlocksTotal": {
-              "metric": "dfs.FSNamesystem.BlocksTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocations_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Transactions_num_ops": {
-              "metric": "dfs.namenode.TransactionsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/create_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-              "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Transactions_avg_time": {
-              "metric": "dfs.namenode.TransactionsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/MissingBlocks": {
-              "metric": "dfs.FSNamesystem.MissingBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/delete_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CorruptBlocks": {
-              "metric": "dfs.FSNamesystem.CorruptBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rename_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReport_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/mkdirs_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/fsImageLoadTime": {
-              "metric": "dfs.namenode.FsImageLoadTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getListing_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollEditLog_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/addBlock_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setOwner_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-              "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CreateFileOps": {
-              "metric": "dfs.namenode.CreateFileOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/register_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getListing_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/renewLease_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/dfs/namenode/Used": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/TotalLoad": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/BlockCapacity": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/TotalFiles": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/HostName": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/GetListingOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/UpgradeFinalized": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/fsync_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/Safemode": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CorruptBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/LiveNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/renewLease_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getFileInfo_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemaining": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/PercentRemaining": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/complete_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getBlockLocations_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/AddBlockOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Syncs_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/PercentUsed": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/DecomNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/blockReport_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/NonDfsUsedSpace": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/UpgradeFinalized": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getFileInfo_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getEditLogSize_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReceived_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Safemode": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/FilesCreated": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/addBlock_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/DecomNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsed": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/DeadNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/PercentUsed": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Free": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Total": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/GetBlockLocations": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/fsync_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/create_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/UnderReplicatedBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/FileInfoOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/MissingBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReport_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CapacityRemaining": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getEditLogSize_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/FilesInGetListingOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/BlocksTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/complete_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/LiveNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollFsImage_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Syncs_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReceived_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollEditLog_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/DeadNodes": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/FilesTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Version": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/ExcessBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/PercentRemaining": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/blockReport_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollFsImage_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/BlocksTotal": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getBlockLocations_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Transactions_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/create_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CapacityTotal": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Transactions_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/MissingBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/Threads": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CorruptBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/blockReport_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/TotalFiles": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/NameDirStatuses": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getListing_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/rollEditLog_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/addBlock_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/CapacityUsed": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/CreateFileOps": {
-              "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logError": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/Version": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/getListing_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/NonDfsUsedSpace": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpcdetailed/renewLease_avg_time": {
-              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/namenode/TotalBlocks": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/dfs/FSNamesystem/TotalLoad": {
-              "metric": "dfs.FSNamesystem.TotalLoad",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotal": {
-              "metric": "dfs.FSNamesystem.CapacityTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsed": {
-              "metric": "dfs.FSNamesystem.CapacityUsed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemaining": {
-              "metric": "dfs.FSNamesystem.CapacityRemaining",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-              "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/BlockCapacity": {
-              "metric": "dfs.FSNamesystem.BlockCapacity",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetListingOps": {
-              "metric": "dfs.namenode.GetListingOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesAppended": {
-              "metric": "dfs.namenode.FilesAppended",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/fsync_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/renewLease_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getFileInfo_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/complete_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setPermission_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-              "metric": "dfs.FSNamesystem.CapacityTotalGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setOwner_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocations_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-              "metric": "dfs.FSNamesystem.CapacityUsedGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/AddBlockOps": {
-              "metric": "dfs.namenode.AddBlockOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesDeleted": {
-              "metric": "dfs.namenode.FilesDeleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Syncs_avg_time": {
-              "metric": "dfs.namenode.SyncsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/blockReport_avg_time": {
-              "metric": "dfs.namenode.BlockReportAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getFileInfo_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getEditLogSize_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReceived_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/versionRequest_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/versionRequest_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/addBlock_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesCreated": {
-              "metric": "dfs.namenode.FilesCreated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rename_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setSafeMode_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setPermission_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesRenamed": {
-              "metric": "dfs.namenode.FilesRenamed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/register_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setReplication_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-              "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/GetBlockLocations": {
-              "metric": "dfs.namenode.GetBlockLocations",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/fsync_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/create_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-              "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/delete_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FileInfoOps": {
-              "metric": "dfs.namenode.FileInfoOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/DeleteFileOps": {
-              "metric": "dfs.namenode.DeleteFileOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReport_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setSafeMode_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-              "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getEditLogSize_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/FilesInGetListingOps": {
-              "metric": "dfs.namenode.FilesInGetListingOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/complete_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollFsImage_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Syncs_num_ops": {
-              "metric": "dfs.namenode.SyncsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReceived_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setReplication_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollEditLog_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/FilesTotal": {
-              "metric": "dfs.FSNamesystem.FilesTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ExcessBlocks": {
-              "metric": "dfs.FSNamesystem.ExcessBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/blockReport_num_ops": {
-              "metric": "dfs.namenode.BlockReportNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/SafemodeTime": {
-              "metric": "dfs.namenode.SafemodeTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollFsImage_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/mkdirs_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-              "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/BlocksTotal": {
-              "metric": "dfs.FSNamesystem.BlocksTotal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocations_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Transactions_num_ops": {
-              "metric": "dfs.namenode.TransactionsNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/create_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-              "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/Transactions_avg_time": {
-              "metric": "dfs.namenode.TransactionsAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/MissingBlocks": {
-              "metric": "dfs.FSNamesystem.MissingBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/delete_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/CorruptBlocks": {
-              "metric": "dfs.FSNamesystem.CorruptBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rename_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blockReport_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/mkdirs_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/fsImageLoadTime": {
-              "metric": "dfs.namenode.FsImageLoadTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getListing_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rollEditLog_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/addBlock_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/setOwner_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-              "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/dfs/namenode/CreateFileOps": {
-              "metric": "dfs.namenode.CreateFileOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/register_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getListing_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/renewLease_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/dfs/namenode/Used": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/dfs/FSNamesystem/TotalLoad": {
-              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-              "pointInTime": true,
-      

<TRUNCATED>

[19/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
deleted file mode 100644
index 810f3b5..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
+++ /dev/null
@@ -1,13655 +0,0 @@
-{
-  "HBASE_REGIONSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/hbase/regionserver/compactionTime_avg_time": {
-              "metric": "hbase.regionserver.compactionTime_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/closeRegion_num_ops": {
-              "metric": "rpc.rpc.closeRegion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-              "metric": "regionserver.Server.mutationsWithoutWALSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/unassign_num_ops": {
-              "metric": "rpc.rpc.unassign_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/modifyTable_num_ops": {
-              "metric": "rpc.rpc.modifyTable_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolVersion_avg_time": {
-              "metric": "rpc.rpc.getProtocolVersion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getClosestRowBefore_num_ops": {
-              "metric": "rpc.rpc.getClosestRowBefore_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/slowAppendCount": {
-              "metric": "regionserver.Server.slowAppendCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/lockRow_num_ops": {
-              "metric": "rpc.rpc.lockRow_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/flushRegion_avg_time": {
-              "metric": "rpc.rpc.flushRegion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/stopMaster_num_ops": {
-              "metric": "rpc.rpc.stopMaster_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/balance_avg_time": {
-              "metric": "rpc.rpc.balance_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/modifyColumn_avg_time": {
-              "metric": "rpc.rpc.modifyColumn_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/multi/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/rootIndexSizeKB": {
-              "metric": "hbase.regionserver.rootIndexSizeKB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getZooKeeper_num_ops": {
-              "metric": "rpc.rpc.getZooKeeper_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheCount": {
-              "metric": "regionserver.Server.blockCacheCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/flushRegion_num_ops": {
-              "metric": "rpc.rpc.flushRegion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_std_dev": {
-              "metric": "hbase.regionserver.putRequestLatency_std_dev",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_std_dev": {
-              "metric": "hbase.regionserver.getRequestLatency_std_dev",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/get_num_ops": {
-              "metric": "rpc.rpc.get_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/stopMaster_avg_time": {
-              "metric": "rpc.rpc.stopMaster_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/removeFromOnlineRegions_num_ops": {
-              "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/ping_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getRegionInfo_avg_time": {
-              "metric": "rpc.rpc.getRegionInfo_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/lockRow_avg_time": {
-              "metric": "rpc.rpc.lockRow_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/commitPending_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkOOME_num_ops": {
-              "metric": "rpc.rpc.checkOOME_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/reportRSFatalError_num_ops": {
-              "metric": "rpc.rpc.reportRSFatalError_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/reportRSFatalError_avg_time": {
-              "metric": "rpc.rpc.reportRSFatalError_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_min": {
-              "metric": "regionserver.Server.Delete_min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getClusterStatus_num_ops": {
-              "metric": "rpc.rpc.getClusterStatus_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getHTableDescriptors_avg_time": {
-              "metric": "rpc.rpc.getHTableDescriptors_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.rpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/deleteColumn_num_ops": {
-              "metric": "rpc.rpc.deleteColumn_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/delete/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/increment_num_ops": {
-              "metric": "rpc.rpc.increment_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/stop/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/modifyColumn_num_ops": {
-              "metric": "rpc.rpc.modifyColumn_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkOOME_avg_time": {
-              "metric": "rpc.rpc.checkOOME_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/next/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.next.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcSlowResponse_avg_time": {
-              "metric": "rpc.rpc.RpcSlowResponse_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getConfiguration_avg_time": {
-              "metric": "rpc.rpc.getConfiguration_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/unassign_avg_time": {
-              "metric": "rpc.rpc.unassign_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/delete/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/canCommit_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/multi/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-              "metric": "regionserver.Server.Delete_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/compactRegion_avg_time": {
-              "metric": "rpc.rpc.compactRegion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/writeRequestsCount": {
-              "metric": "regionserver.Server.writeRequestCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/execCoprocessor_num_ops": {
-              "metric": "rpc.rpc.execCoprocessor_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/canCommit_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_min": {
-              "metric": "regionserver.Server.Get_min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/incrementColumnValue_avg_time": {
-              "metric": "rpc.rpc.incrementColumnValue_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/deleteTable_num_ops": {
-              "metric": "rpc.rpc.deleteTable_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
-              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-              "metric": "regionserver.Server.Mutate_75th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheHitCount": {
-              "metric": "regionserver.Server.blockCacheHitCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/exists_avg_time": {
-              "metric": "rpc.rpc.exists_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/slowPutCount": {
-              "metric": "regionserver.Server.slowPutCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
-              "metric": "hbase.regionserver.fsWriteLatency_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/exists/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/delete_num_ops": {
-              "metric": "rpc.rpc.delete_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/exists_num_ops": {
-              "metric": "rpc.rpc.exists_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/regionServerStartup_avg_time": {
-              "metric": "rpc.rpc.regionServerStartup_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndDelete_num_ops": {
-              "metric": "rpc.rpc.checkAndDelete_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/closeRegion_avg_time": {
-              "metric": "rpc.rpc.closeRegion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolSignature_avg_time": {
-              "metric": "rpc.rpc.getProtocolSignature_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/assign_avg_time": {
-              "metric": "rpc.rpc.assign_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/compactionSize_num_ops": {
-              "metric": "hbase.regionserver.compactionSize_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/close_avg_time": {
-              "metric": "rpc.rpc.close_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheSize": {
-              "metric": "regionserver.Server.blockCacheSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-              "metric": "regionserver.Server.Mutate_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getHServerInfo_num_ops": {
-              "metric": "rpc.rpc.getHServerInfo_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/stop_avg_time": {
-              "metric": "rpc.rpc.stop_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isStopped_num_ops": {
-              "metric": "rpc.rpc.isStopped_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_median": {
-              "metric": "regionserver.Server.Mutate_median",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isMasterRunning_avg_time": {
-              "metric": "rpc.rpc.isMasterRunning_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/incrementColumnValue_num_ops": {
-              "metric": "rpc.rpc.incrementColumnValue_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
-              "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
-              "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/readRequestsCount": {
-              "metric": "regionserver.Server.readRequestCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_min": {
-              "metric": "regionserver.Server.Mutate_min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/storefileIndexSizeMB": {
-              "metric": "regionserver.Server.storeFileIndexSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/assign_num_ops": {
-              "metric": "rpc.rpc.assign_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/close/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.close.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_median": {
-              "metric": "regionserver.Server.Delete_median",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/enableTable_avg_time": {
-              "metric": "rpc.rpc.enableTable_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_mean": {
-              "metric": "regionserver.Server.Mutate_mean",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/close_num_ops": {
-              "metric": "rpc.rpc.close_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/done_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.done_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/compactionSize_avg_time": {
-              "metric": "hbase.regionserver.compactionSize_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getFromOnlineRegions_avg_time": {
-              "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/increment/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/deleteTable_avg_time": {
-              "metric": "rpc.rpc.deleteTable_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/put/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.put.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/delete_avg_time": {
-              "metric": "rpc.rpc.delete_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/statusUpdate_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.rpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getClusterStatus_avg_time": {
-              "metric": "rpc.rpc.getClusterStatus_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/removeFromOnlineRegions_avg_time": {
-              "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/put/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.put.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/modifyTable_avg_time": {
-              "metric": "rpc.rpc.modifyTable_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndPut_avg_time": {
-              "metric": "rpc.rpc.checkAndPut_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/put_avg_time": {
-              "metric": "rpc.rpc.put_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheHitRatio": {
-              "metric": "hbase.regionserver.blockCacheHitRatio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/createTable_avg_time": {
-              "metric": "rpc.rpc.createTable_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getHTableDescriptors_num_ops": {
-              "metric": "rpc.rpc.getHTableDescriptors_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getAlterStatus_avg_time": {
-              "metric": "rpc.rpc.getAlterStatus_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getRegionInfo_num_ops": {
-              "metric": "rpc.rpc.getRegionInfo_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/statusUpdate_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/compactRegion_num_ops": {
-              "metric": "rpc.rpc.compactRegion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isAborted_num_ops": {
-              "metric": "rpc.rpc.isAborted_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheEvictedCount": {
-              "metric": "regionserver.Server.blockCacheEvictionCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/disableTable_num_ops": {
-              "metric": "rpc.rpc.disableTable_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openScanner_num_ops": {
-              "metric": "rpc.rpc.openScanner_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/regionServerReport_num_ops": {
-              "metric": "rpc.rpc.regionServerReport_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegions_avg_time": {
-              "metric": "rpc.rpc.openRegions_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/exists/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-              "metric": "regionserver.Server.Mutate_99th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isMasterRunning_num_ops": {
-              "metric": "rpc.rpc.isMasterRunning_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/balanceSwitch_num_ops": {
-              "metric": "rpc.rpc.balanceSwitch_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/offline_num_ops": {
-              "metric": "rpc.rpc.offline_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_max": {
-              "metric": "regionserver.Server.Get_max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/abort_num_ops": {
-              "metric": "rpc.rpc.abort_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
-              "metric": "hbase.regionserver.blockCacheHitCachingRatio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rollHLogWriter_num_ops": {
-              "metric": "rpc.rpc.rollHLogWriter_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegions_num_ops": {
-              "metric": "rpc.rpc.openRegions_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/splitRegion_avg_time": {
-              "metric": "rpc.rpc.splitRegion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-              "metric": "regionserver.Server.Get_99th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-              "metric": "regionserver.Server.Delete_99th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getTask_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/replicateLogEntries_num_ops": {
-              "metric": "rpc.rpc.replicateLogEntries_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/multi_avg_time": {
-              "metric": "rpc.rpc.multi_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/slowIncrementCount": {
-              "metric": "regionserver.Server.slowIncrementCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-              "metric": "regionserver.Server.Mutate_95th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/compactionQueueSize": {
-              "metric": "regionserver.Server.compactionQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getCatalogTracker_avg_time": {
-              "metric": "rpc.rpc.getCatalogTracker_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/splitRegion_num_ops": {
-              "metric": "rpc.rpc.splitRegion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/balance_num_ops": {
-              "metric": "rpc.rpc.balance_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/flushTime_num_ops": {
-              "metric": "hbase.regionserver.flushTime_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/shutdown_num_ops": {
-              "metric": "rpc.rpc.shutdown_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatency_num_ops": {
-              "metric": "hbase.regionserver.fsReadLatency_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-              "metric": "regionserver.Server.Get_75th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getServerName_avg_time": {
-              "metric": "rpc.rpc.getServerName_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/compactionTime_num_ops": {
-              "metric": "hbase.regionserver.compactionTime_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/abort_avg_time": {
-              "metric": "rpc.rpc.abort_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/enableTable_num_ops": {
-              "metric": "rpc.rpc.enableTable_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/stores": {
-              "metric": "regionserver.Server.storeCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/addColumn_avg_time": {
-              "metric": "rpc.rpc.addColumn_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getServerName_num_ops": {
-              "metric": "rpc.rpc.getServerName_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.rpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/disableTable_avg_time": {
-              "metric": "rpc.rpc.disableTable_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/abort/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegion_avg_time": {
-              "metric": "rpc.rpc.openRegion_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/regionServerReport_avg_time": {
-              "metric": "rpc.rpc.regionServerReport_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getAlterStatus_num_ops": {
-              "metric": "rpc.rpc.getAlterStatus_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/next_avg_time": {
-              "metric": "rpc.rpc.next_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-              "metric": "regionserver.Server.Get_num_ops",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/bulkLoadHFiles_num_ops": {
-              "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/ping_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatency_avg_time": {
-              "metric": "hbase.regionserver.fsReadLatency_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/flushSize_num_ops": {
-              "metric": "hbase.regionserver.flushSize_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/balanceSwitch_avg_time": {
-              "metric": "rpc.rpc.balanceSwitch_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/putRequestLatency_max": {
-              "metric": "regionserver.Server.Mutate_max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.callQueueLen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegion_num_ops": {
-              "metric": "rpc.rpc.openRegion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
-              "metric": "hbase.regionserver.fsSyncLatency_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getOnlineRegions_avg_time": {
-              "metric": "rpc.rpc.getOnlineRegions_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/move_num_ops": {
-              "metric": "rpc.rpc.move_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/stop_num_ops": {
-              "metric": "rpc.rpc.stop_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/replicateLogEntries_avg_time": {
-              "metric": "rpc.rpc.replicateLogEntries_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_mean": {
-              "metric": "regionserver.Server.Get_mean",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/get_avg_time": {
-              "metric": "rpc.rpc.get_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/multi_num_ops": {
-              "metric": "rpc.rpc.multi_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/next/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.next.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/addToOnlineRegions_avg_time": {
-              "metric": "rpc.rpc.addToOnlineRegions_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/deleteColumn_avg_time": {
-              "metric": "rpc.rpc.deleteColumn_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/regions": {
-              "metric": "regionserver.Server.regionCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/bulkLoadHFiles_avg_time": {
-              "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/stop/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/addToOnlineRegions_num_ops": {
-              "metric": "rpc.rpc.addToOnlineRegions_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/abort/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheFree": {
-              "metric": "regionserver.Server.blockCacheFreeSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/offline_avg_time": {
-              "metric": "rpc.rpc.offline_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/unlockRow_avg_time": {
-              "metric": "rpc.rpc.unlockRow_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/blockCacheMissCount": {
-              "metric": "regionserver.Server.blockCacheMissCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/getCatalogTracker_num_ops": {
-              "metric": "rpc.rpc.getCatalogTracker_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/flushQueueSize": {
-              "metric": "regionserver.Server.flushQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/close/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.close.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/execCoprocessor_avg_time": {
-              "metric": "rpc.rpc.execCoprocessor_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/createTable_num_ops": {
-              "metric": "rpc.rpc.createTable_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getConfiguration_num_ops": {
-              "metric": "rpc.rpc.getConfiguration_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isStopped_avg_time": {
-              "metric": "rpc.rpc.isStopped_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rollHLogWriter_avg_time": {
-              "metric": "rpc.rpc.rollHLogWriter_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
-              "metric": "hbase.regionserver.fsSyncLatency_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-              "metric": "regionserver.Server.Delete_mean",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-              "metric": "regionserver.Server.staticIndexSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getFromOnlineRegions_num_ops": {
-              "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-              "metric": "regionserver.Server.mutationsWithoutWALCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/get/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.get.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_median": {
-              "metric": "regionserver.Server.Get_median",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/openScanner_avg_time": {
-              "metric": "rpc.rpc.openScanner_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcSlowResponse_num_ops": {
-              "metric": "rpc.rpc.RpcSlowResponse_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/isAborted_avg_time": {
-              "metric": "rpc.rpc.isAborted_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/flushSize_avg_time": {
-              "metric": "hbase.regionserver.flushSize_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/commitPending_avg_time": {
-              "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getClosestRowBefore_avg_time": {
-              "metric": "rpc.rpc.getClosestRowBefore_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_max": {
-              "metric": "regionserver.Server.Delete_max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/get/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.get.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/put_num_ops": {
-              "metric": "rpc.rpc.put_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/move_avg_time": {
-              "metric": "rpc.rpc.move_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/percentFilesLocal": {
-              "metric": "regionserver.Server.percentFilesLocal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
-              "metric": "hbase.regionserver.fsWriteLatency_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/increment/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/getTask_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/addColumn_num_ops": {
-              "metric": "rpc.rpc.addColumn_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/maxMemoryM": {
-              "metric": "jvm.metrics.maxMemoryM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getOnlineRegions_num_ops": {
-              "metric": "rpc.rpc.getOnlineRegions_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/flushTime_avg_time": {
-              "metric": "hbase.regionserver.flushTime_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/done_num_ops": {
-              "metric": "rpcdetailed.rpcdetailed.done_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolVersion_num_ops": {
-              "metric": "rpc.rpc.getProtocolVersion_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/unlockRow_num_ops": {
-              "metric": "rpc.rpc.unlockRow_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/slowGetCount": {
-              "metric": "regionserver.Server.slowGetCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
-              "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/shutdown_avg_time": {
-              "metric": "rpc.rpc.shutdown_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/regionServerStartup_num_ops": {
-              "metric": "rpc.rpc.regionServerStartup_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/requests": {
-              "metric": "regionserver.Server.totalRequestCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
-              "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/storefiles": {
-              "metric": "regionserver.Server.storeFileCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/next_num_ops": {
-              "metric": "rpc.rpc.next_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
-              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/slowDeleteCount": {
-              "metric": "regionserver.Server.slowDeleteCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndDelete_avg_time": {
-              "metric": "rpc.rpc.checkAndDelete_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getHServerInfo_avg_time": {
-              "metric": "rpc.rpc.getHServerInfo_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getZooKeeper_avg_time": {
-              "metric": "rpc.rpc.getZooKeeper_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/hlogFileCount": {
-              "metric": "hbase.regionserver.hlogFileCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-              "metric": "regionserver.Server.Get_95th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-              "metric": "regionserver.Server.Delete_95th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/memstoreSizeMB": {
-              "metric": "regionserver.Server.memStoreSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
-              "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolSignature_num_ops": {
-              "metric": "rpc.rpc.getProtocolSignature_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
-              "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-              "metric": "regionserver.Server.Delete_75th_percentile",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.rpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-              "metric": "regionserver.Server.staticBloomSize",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/checkAndPut_num_ops": {
-              "metric": "rpc.rpc.checkAndPut_num_ops",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/increment_avg_time": {
-              "metric": "rpc.rpc.increment_avg_time",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/hbase/regionserver/slowPutCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/percentFilesLocal": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_min": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/blockCacheFree": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/blockCacheMissCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/flushQueueSize": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/slowAppendCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/blockCacheSize": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/slowIncrementCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/blockCacheEvictedCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/compactionQueueSize": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_median": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/slowGetCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/blockCacheCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/readRequestsCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_min": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/storefileIndexSizeMB": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_median": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_max": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_mean": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/requests": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/storefiles": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/writeRequestsCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/getRequestLatency_median": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/slowDeleteCount": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/hbase/regionserver/stores": {
-              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "

<TRUNCATED>

[10/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
deleted file mode 100644
index da0f60b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
deleted file mode 100644
index e26148b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-export no_proxy=$ttonhost
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat.py
deleted file mode 100644
index 31c1673..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hcat():
-  import params
-
-  Directory(params.hive_conf_dir,
-            recursive=True,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-
-  Directory(params.hcat_conf_dir,
-            recursive=True,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            recursive=True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_client_conf_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-  File(format("{hcat_conf_dir}/hcat-env.sh"),
-       owner=params.hcat_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hcat_env_sh_template)
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100644
index 8b5921a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hcat import hcat
-
-class HCatClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    hcat()
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100644
index 081352a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import get_unique_id_and_date
-
-def hcat_service_check():
-    import params
-    unique = get_unique_id_and_date()
-    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinit_cmd = ""
-
-    File(format("{tmp_dir}/hcatSmoke.sh"),
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare")
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin', params.execute_path],
-            logoutput=True)
-
-    if params.security_enabled:
-      ExecuteHadoop(test_cmd,
-                    user=params.hdfs_user,
-                    logoutput=True,
-                    conf_dir=params.hadoop_conf_dir,
-                    security_enabled=params.security_enabled,
-                    kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab,
-                    principal=params.hdfs_principal_name,
-                    bin_dir=params.execute_path
-      )
-    else:
-      ExecuteHadoop(test_cmd,
-                    user=params.hdfs_user,
-                    logoutput=True,
-                    conf_dir=params.hadoop_conf_dir,
-                    security_enabled=params.security_enabled,
-                    kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab,
-                    bin_dir=params.execute_path
-      )
-
-    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin', params.execute_path],
-            logoutput=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index e388ee5..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def hive(name=None):
-  import params
-
-  if name == 'hiveserver2':
-
-    params.HdfsDirectory(params.hive_apps_whs_dir,
-                         action="create_delayed",
-                         owner=params.hive_user,
-                         mode=0777
-    )
-    params.HdfsDirectory(params.hive_hdfs_user_dir,
-                         action="create_delayed",
-                         owner=params.hive_user,
-                         mode=params.hive_hdfs_user_mode
-    )
-    params.HdfsDirectory(None, action="create")
-  
-  # We should change configurations for client as well as for server.
-  # The reason is that stale-configs are service-level, not component.
-  for conf_dir in params.hive_conf_dirs_list:
-    fill_conf_dir(conf_dir)
-
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hive_env_sh_template)
-  )
-
-  if name == 'metastore' or name == 'hiveserver2':
-    jdbc_connector()
-    
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-
-  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" "
-               "--retry 5 "
-               "{jdk_location}{check_db_connection_jar_name} "
-               "-o {check_db_connection_jar_name}'")
-
-  Execute(cmd,
-          not_if=format("[ -f {check_db_connection_jar_name}]"),
-          environment = environment)
-
-  if name == 'metastore':
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-    if params.init_metastore_schema:
-      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
-                                 "{hive_bin}/schematool -initSchema "
-                                 "-dbType {hive_metastore_db_type} "
-                                 "-userName {hive_metastore_user_name} "
-                                 "-passWord {hive_metastore_user_passwd!p}")
-
-      check_schema_created_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
-                                        "{hive_bin}/schematool -info "
-                                        "-dbType {hive_metastore_db_type} "
-                                        "-userName {hive_metastore_user_name} "
-                                        "-passWord {hive_metastore_user_passwd!p}")
-
-      Execute(create_schema_cmd,
-              not_if = check_schema_created_cmd
-      )
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=Template(format('{start_hiveserver2_script}'))
-    )
-
-  if name != "client":
-    crt_directory(params.hive_pid_dir)
-    crt_directory(params.hive_log_dir)
-    crt_directory(params.hive_var_lib)
-
-def fill_conf_dir(component_conf_dir):
-  import params
-  
-  Directory(component_conf_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-
-  crt_file(format("{component_conf_dir}/hive-default.xml.template"))
-  crt_file(format("{component_conf_dir}/hive-env.sh.template"))
-
-  log4j_exec_filename = 'hive-exec-log4j.properties'
-  if (params.log4j_exec_props != None):
-    File(format("{component_conf_dir}/{log4j_exec_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.log4j_exec_props
-    )
-  elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
-    File(format("{component_conf_dir}/{log4j_exec_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
-    )
-
-  log4j_filename = 'hive-log4j.properties'
-  if (params.log4j_props != None):
-    File(format("{component_conf_dir}/{log4j_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.log4j_props
-    )
-  elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
-    File(format("{component_conf_dir}/{log4j_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
-    )
-
-
-def crt_directory(name):
-  import params
-
-  Directory(name,
-            recursive=True,
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-
-def crt_file(name):
-  import params
-
-  File(name,
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-
-def jdbc_connector():
-  import params
-
-  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
-    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            environment= {'PATH' : params.execute_path },
-            path=["/bin", "/usr/bin/"])
-  elif params.hive_jdbc_driver == "org.postgresql.Driver":
-    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            environment= {'PATH' : params.execute_path },
-            path=["/bin", "usr/bin/"])
-
-  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-    environment = {
-      "no_proxy": format("{ambari_server_hostname}")
-    }
-
-    cmd = format(
-      "mkdir -p {artifact_dir} ; "
-      "curl -kf -x \"\" --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
-      "cp {driver_curl_target} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            path=["/bin", "/usr/bin/"],
-            environment=environment)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_client.py
deleted file mode 100644
index 499f632..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-
-from hive import hive
-
-class HiveClient(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100644
index 6ee5507..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-from mysql_service import mysql_service
-
-class HiveMetastore(Script):
-
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='metastore')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'metastore',
-                   action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'metastore',
-                   action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_server.py
deleted file mode 100644
index 34f2d96..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hive import hive
-from hive_service import hive_service
-from install_jars import install_tez_jars
-
-class HiveServer(Script):
-
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='hiveserver2')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    
-    install_tez_jars() # Put tez jars in hdfs
-
-    hive_service( 'hiveserver2',
-                  action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'hiveserver2',
-                  action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-
-if __name__ == "__main__":
-  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_service.py
deleted file mode 100644
index 5463df4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-import time
-from resource_management.core.shell import call
-
-def hive_service(
-    name,
-    action='start'):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format(
-      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format(
-      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
-
-  process_id_exists = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    if name == 'hiveserver2':
-      check_fs_root()
-
-    demon_cmd = format("{cmd}")
-    
-    Execute(demon_cmd,
-            user=params.hive_user,
-            environment={'HADOOP_HOME': params.hadoop_home},
-            path=params.execute_path,
-            not_if=process_id_exists
-    )
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
-       params.hive_jdbc_driver == "org.postgresql.Driver" or \
-       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-      
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
-      
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
-      
-    # AMBARI-5800 - wait for the server to come up instead of just the PID existance
-    if name == 'hiveserver2':
-      SOCKET_WAIT_SECONDS = 120
-      address=params.hive_server_host
-      port=int(params.hive_server_port)
-      
-      start_time = time.time()
-      end_time = start_time + SOCKET_WAIT_SECONDS
-
-      is_service_socket_valid = False
-      print "Waiting for the Hive server to start..."
-      while time.time() < end_time:
-        if check_thrift_port_sasl(address, port, 2, security_enabled=params.security_enabled):
-          is_service_socket_valid = True
-          break
-        else:
-          time.sleep(2)
-
-      elapsed_time = time.time() - start_time    
-      
-      if is_service_socket_valid == False: 
-        raise Fail("Connection to Hive server %s on port %s failed after %d seconds" % (address, port, elapsed_time))
-      
-      print "Successfully connected to Hive at %s on port %s after %d seconds" % (address, port, elapsed_time)    
-            
-  elif action == 'stop':
-    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
-    Execute(demon_cmd,
-            not_if = format("! ({process_id_exists})")
-    )
-
-def check_fs_root():
-  import params  
-  fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
-  cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs://")
-  code, out = call(cmd, user=params.hive_user)
-  if code == 0 and fs_root_url.strip() != out.strip():
-    cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
-    Execute(cmd,
-            environment= {'PATH' : params.execute_path },
-            user=params.hive_user)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/install_jars.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/install_jars.py
deleted file mode 100644
index 08a0a50..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/install_jars.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import os
-import fnmatch
-
-def install_tez_jars():
-  import params
-  
-  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
-
-  # If tez libraries are to be stored in hdfs
-  if destination_hdfs_dirs:
-    for hdfs_dir in destination_hdfs_dirs:
-      params.HdfsDirectory(hdfs_dir,
-                          action="create_delayed",
-                          owner=params.tez_user,
-                          mode=0755
-      )
-    pass
-    params.HdfsDirectory(None, action="create")
-
-    if params.security_enabled:
-      kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-    else:
-      kinit_if_needed = ""
-
-    if kinit_if_needed:
-      Execute(kinit_if_needed,
-              user=params.tez_user,
-              path='/bin'
-      )
-    pass
-
-    app_dir_path = None
-    lib_dir_path = None
-
-    if len(destination_hdfs_dirs) > 0:
-      for path in destination_hdfs_dirs:
-        if 'lib' in path:
-          lib_dir_path = path
-        else:
-          app_dir_path = path
-        pass
-      pass
-    pass
-
-    if app_dir_path:
-      for scr_file, dest_file in params.app_dir_files.iteritems():
-        CopyFromLocal(scr_file,
-                      mode=0755,
-                      owner=params.tez_user,
-                      dest_dir=app_dir_path,
-                      dest_file=dest_file,
-                      kinnit_if_needed=kinit_if_needed,
-                      hdfs_user=params.hdfs_user,
-                      hadoop_bin_dir=params.hadoop_bin_dir,
-                      hadoop_conf_dir=params.hadoop_conf_dir
-        )
-
-    if lib_dir_path:
-      CopyFromLocal(params.tez_local_lib_jars,
-                    mode=0755,
-                    owner=params.tez_user,
-                    dest_dir=lib_dir_path,
-                    kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user,
-                    hadoop_bin_dir=params.hadoop_bin_dir,
-                    hadoop_conf_dir=params.hadoop_conf_dir
-      )
-    pass
-
-
-def get_tez_hdfs_dir_paths(tez_lib_uris = None):
-  hdfs_path_prefix = 'hdfs://'
-  lib_dir_paths = []
-  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
-    dir_paths = tez_lib_uris.split(',')
-    for path in dir_paths:
-      if not "tez.tar.gz" in path:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
-        lib_dir_paths.append(lib_dir_path)
-      else:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_paths.append(os.path.dirname(lib_dir_path))
-    pass
-  pass
-
-  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100644
index 6df6059..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from mysql_service import mysql_service
-
-class MysqlServer(Script):
-
-  def install(self, env):
-    import params
-    
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=params.daemon_name, action='start')
-
-    File(params.mysql_adduser_path,
-         mode=0755,
-         content=StaticFile('addMysqlUser.sh')
-    )
-
-    cmd = format("bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {mysql_host[0]}")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    )
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=params.daemon_name, action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=params.daemon_name, action = 'stop')
-
-  def status(self, env):
-    import status_params
-    mysql_service(daemon_name=status_params.daemon_name, action = 'status')
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100644
index 11bbdd8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'):
-  status_cmd = format('service {daemon_name} status | grep running')
-  cmd = format('service {daemon_name} {action}')
-
-  if action == 'status':
-    Execute(status_cmd)
-  elif action == 'stop':
-    Execute(cmd,
-            logoutput = True,
-            only_if = status_cmd
-    )
-  elif action == 'start':
-    # required for running hive
-    replace_bind_address = format("sed -i 's|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|' {mysql_configname}")  
-    Execute(replace_bind_address)
-    
-    Execute(cmd,
-      logoutput = True,
-      not_if = status_cmd
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/params.py
deleted file mode 100644
index 835e018..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-phd_stack_version = config['hostLevelParams']['stack_version']
-
-#hadoop params
-if rpm_version:
-  hadoop_bin_dir = "/usr/phd/current/hadoop-client/bin"
-  hadoop_home = '/usr/phd/current/hadoop-client'
-  hadoop_streeming_jars = "/usr/phd/current/hadoop-mapreduce-client/hadoop-streaming-*.jar"
-  hive_bin = '/usr/phd/current/hive-client/bin'
-  hive_lib = '/usr/phd/current/hive-client/lib'
-  tez_local_api_jars = '/usr/phd/current/tez-client/tez*.jar'
-  tez_local_lib_jars = '/usr/phd/current/tez-client/lib/*.jar'
-  tez_tar_file = "/usr/phd/current/tez-client/lib/tez*.tar.gz"
-  pig_tar_file = '/usr/phd/current/pig-client/pig.tar.gz'
-  hive_tar_file = '/usr/phd/current/hive-client/hive.tar.gz'
-  sqoop_tar_file = '/usr/phd/current/sqoop-client/sqoop*.tar.gz'
-
-  hcat_lib = '/usr/phd/current/hive/hive-hcatalog/share/hcatalog'
-  webhcat_bin_dir = '/usr/phd/current/hive-hcatalog/sbin'
-
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
-  hive_bin = '/usr/lib/hive/bin'
-  hive_lib = '/usr/lib/hive/lib/'
-  tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-  tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-  tez_tar_file = "/usr/lib/tez/tez*.tar.gz"
-  pig_tar_file = '/usr/share/PHD-webhcat/pig.tar.gz'
-  hive_tar_file = '/usr/share/PHD-webhcat/hive.tar.gz'
-  sqoop_tar_file = '/usr/share/PHD-webhcat/sqoop*.tar.gz'
-
-  if str(phd_stack_version).startswith('2.0'):
-    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-  # for newer versions
-  else:
-    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
-    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hive_conf_dir = "/etc/hive/conf"
-hive_client_conf_dir = "/etc/hive/conf"
-hive_server_conf_dir = '/etc/hive/conf.server'
-
-# for newer versions
-hcat_conf_dir = '/etc/hive-hcatalog/conf'
-config_dir = '/etc/hive-webhcat/conf'
-
-execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
-
-#users
-hive_user = config['configurations']['hive-env']['hive_user']
-#JDBC driver jar name
-hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
-if hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = "mysql-connector-java.jar"
-  jdbc_symlink_name = "mysql-jdbc-driver.jar"
-elif hive_jdbc_driver == "org.postgresql.Driver":
-  jdbc_jar_name = "postgresql-jdbc.jar"
-  jdbc_symlink_name = "postgres-jdbc-driver.jar"
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = "ojdbc6.jar"
-  jdbc_symlink_name = "oracle-jdbc-driver.jar"
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-#common
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_var_lib = '/var/lib/hive'
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
-hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
-smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-
-#hive_env
-hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
-hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-#Default conf dir for client
-hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
-
-if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  hive_config_dir = hive_server_conf_dir
-else:
-  hive_config_dir = hive_client_conf_dir
-
-#hive-site
-hive_database_name = config['configurations']['hive-env']['hive_database_name']
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh.j2'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-java_share_dir = '/usr/share/java'
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-
-target = format("{hive_lib}/{jdbc_jar_name}")
-
-jdk_location = config['hostLevelParams']['jdk_location']
-driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-
-start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
-start_metastore_path = format("{tmp_dir}/start_metastore_script")
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
-java64_home = config['hostLevelParams']['java_home']
-
-##### MYSQL
-
-db_name = config['configurations']['hive-env']['hive_database_name']
-mysql_user = "mysql"
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
-
-##### POSTGRES
-postgresql_adduser_file = "addPostgreSQLUser.sh"
-postgresql_adduser_path = format("{tmp_dir}/{postgresql_adduser_file}")
-postgresql_host = config['clusterHostInfo']['hive_postgresql_host']
-postgresql_pghba_conf_path = "/var/lib/pgsql/data/pg_hba.conf"
-postgresql_conf_path = "/var/lib/pgsql/data/postgresql.conf"
-postgresql_daemon_name = status_params.postgresql_daemon_name
-
-######## Metastore Schema
-init_metastore_schema = True
-
-########## HCAT
-
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-hcat_env_sh_template = config['configurations']['hcat-env']['content']
-
-#hive-log4j.properties.template
-if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
-  log4j_props = config['configurations']['hive-log4j']['content']
-else:
-  log4j_props = None
-
-#hive-exec-log4j.properties.template
-if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
-  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
-else:
-  log4j_exec_props = None
-
-daemon_name = status_params.daemon_name
-hive_env_sh_template = config['configurations']['hive-env']['content']
-
-hive_hdfs_user_dir = format("/user/{hive_user}")
-hive_hdfs_user_mode = 0700
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-tez_user = config['configurations']['tez-env']['tez_user']
-
-if System.get_instance().os_family == "ubuntu":
-  mysql_configname = '/etc/mysql/my.cnf'
-else:
-  mysql_configname = '/etc/my.cnf'
-
-# Hive security
-hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
-
-mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-
-# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
-# trying to install mysql-connector-java upon them can cause packages to conflict.
-if os.path.exists(mysql_jdbc_driver_jar):
-  hive_exclude_packages = ['mysql-connector-java']
-else:  
-  hive_exclude_packages = []
-
-########################################################
-########### WebHCat related params #####################
-########################################################
-
-webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
-templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-templeton_pid_dir = status_params.hcat_pid_dir
-
-webhcat_pid_file = status_params.webhcat_pid_file
-
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-
-hcat_hdfs_user_dir = format("/user/{hcat_user}")
-hcat_hdfs_user_mode = 0755
-webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-webhcat_hdfs_user_mode = 0755
-#for create_hdfs_directory
-security_param = "true" if security_enabled else "false"
-
-if str(phd_stack_version).startswith('2.0') or str(phd_stack_version).startswith('2.1'):
-  app_dir_files = {tez_local_api_jars:None}
-else:
-  app_dir_files = {
-              tez_local_api_jars:None,
-              tez_tar_file:"tez.tar.gz"
-  }
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_server.py
deleted file mode 100644
index 6732573..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_server.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from postgresql_service import postgresql_service
-
-class PostgreSQLServer(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    # init the database, the ':' makes the command always return 0 in case the database has
-    # already been initialized when the postgresql server colocates with ambari server
-    Execute(format("service {postgresql_daemon_name} initdb || :"))
-
-    # update the configuration files
-    self.update_pghda_conf(env)
-    self.update_postgresql_conf(env)
-
-    # Reload the settings and start the postgresql server for the changes to take effect
-    # Note: Don't restart the postgresql server because when Ambari server and the hive metastore on the same machine,
-    # they will share the same postgresql server instance. Restarting the postgresql database may cause the ambari server database connection lost
-    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'reload')
-
-    # ensure the postgresql server is started because the add hive metastore user requires the server is running.
-    self.start(env)
-
-    # create the database and hive_metastore_user
-    File(params.postgresql_adduser_path,
-         mode=0755,
-         content=StaticFile(format("{postgresql_adduser_file}"))
-    )
-
-    cmd = format("bash -x {postgresql_adduser_path} {postgresql_daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {db_name}")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    )
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'stop')
-
-  def status(self, env):
-    import status_params
-    postgresql_service(postgresql_daemon_name=status_params.postgresql_daemon_name, action = 'status')
-
-  def update_postgresql_conf(self, env):
-    import params
-    env.set_params(params)
-
-    # change the listen_address to *
-    Execute(format("sed -i '/^[[:space:]]*listen_addresses[[:space:]]*=.*/d' {postgresql_conf_path}"))
-    Execute(format("echo \"listen_addresses = '*'\" | tee -a {postgresql_conf_path}"))
-
-    # change the standard_conforming_string to off
-    Execute(format("sed -i '/^[[:space:]]*standard_conforming_strings[[:space:]]*=.*/d' {postgresql_conf_path}"))
-    Execute(format("echo \"standard_conforming_strings = off\" | tee -a {postgresql_conf_path}"))
-
-  def update_pghda_conf(self, env):
-    import params
-    env.set_params(params)
-
-    # trust hive_metastore_user and postgres locally
-    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
-    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
-    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
-    Execute(format("echo \"local   all   postgres   trust\" | tee -a {postgresql_pghba_conf_path}"))
-    Execute(format("echo \"local   all   \\\"{hive_metastore_user_name}\\\" trust\" | tee -a {postgresql_pghba_conf_path}"))
-
-    # trust hive_metastore_user and postgres via local interface
-    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
-    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
-    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
-    Execute(format("echo \"host    all   postgres         0.0.0.0/0       trust\" | tee -a {postgresql_pghba_conf_path}"))
-    Execute(format("echo \"host    all   \\\"{hive_metastore_user_name}\\\"         0.0.0.0/0       trust\" | tee -a {postgresql_pghba_conf_path}"))
-
-if __name__ == "__main__":
-  PostgreSQLServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_service.py
deleted file mode 100644
index cc7b4cc..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/postgresql_service.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def postgresql_service(postgresql_daemon_name=None, action='start'):
-  status_cmd = format('service {postgresql_daemon_name} status | grep running')
-  cmd = format('service {postgresql_daemon_name} {action}')
-
-  if action == 'status':
-    Execute(status_cmd)
-  elif action == 'stop':
-    Execute(cmd,
-            logoutput = True,
-            only_if = status_cmd
-    )
-  elif action == 'start':
-    Execute(cmd,
-      logoutput = True,
-      not_if = status_cmd
-    )
-  else:
-    Execute(cmd, logoutput = True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/service_check.py
deleted file mode 100644
index b75578b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import socket
-import sys
-
-from hcat_service_check import hcat_service_check
-from webhcat_service_check import webhcat_service_check
-
-class HiveServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    address=format("{hive_server_host}")
-    port=int(format("{hive_server_port}"))
-    print "Test connectivity to hive server"
-    if check_thrift_port_sasl(address, port, security_enabled=params.security_enabled):
-      print "Successfully connected to %s on port %s" % (address, port)
-    else:
-      print "Connection to %s on port %s failed" % (address, port)
-      exit(1)
-
-    hcat_service_check()
-    webhcat_service_check()
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/status_params.py
deleted file mode 100644
index 3e50761..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
-hive_pid = 'hive-server.pid'
-
-hive_metastore_pid = 'hive.pid'
-
-hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
-webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
-
-if System.get_instance().os_family == "suse" or System.get_instance().os_family == "ubuntu":
-  daemon_name = 'mysql'
-else:
-  daemon_name = 'mysqld'
-
-postgresql_daemon_name = "postgresql"

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat.py
deleted file mode 100644
index c6f41dd..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import sys
-import os.path
-import glob
-
-
-def webhcat():
-  import params
-
-  params.HdfsDirectory(params.webhcat_apps_dir,
-                       action="create_delayed",
-                       owner=params.webhcat_user,
-                       mode=0755
-  )
-  if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-    params.HdfsDirectory(params.hcat_hdfs_user_dir,
-                         action="create_delayed",
-                         owner=params.hcat_user,
-                         mode=params.hcat_hdfs_user_mode
-    )
-  params.HdfsDirectory(params.webhcat_hdfs_user_dir,
-                       action="create_delayed",
-                       owner=params.webhcat_user,
-                       mode=params.webhcat_hdfs_user_mode
-  )
-  params.HdfsDirectory(None, action="create")
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.config_dir,
-            recursive=True,
-            owner=params.webhcat_user,
-            group=params.user_group)
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['webhcat-site'],
-            configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-  )
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=InlineTemplate(params.webhcat_env_sh_template)
-  )
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  CopyFromLocal(params.hadoop_streeming_jars,
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=params.webhcat_apps_dir,
-                kinnit_if_needed=kinit_if_needed,
-                hdfs_user=params.hdfs_user,
-                hadoop_bin_dir=params.hadoop_bin_dir,
-                hadoop_conf_dir=params.hadoop_conf_dir
-  )
-
-  if (os.path.isfile(params.pig_tar_file)):
-    CopyFromLocal(params.pig_tar_file,
-                  owner=params.webhcat_user,
-                  mode=0755,
-                  dest_dir=params.webhcat_apps_dir,
-                  kinnit_if_needed=kinit_if_needed,
-                  hdfs_user=params.hdfs_user,
-                  hadoop_bin_dir=params.hadoop_bin_dir,
-                  hadoop_conf_dir=params.hadoop_conf_dir
-    )
-
-  if (os.path.isfile(params.hive_tar_file)):
-    CopyFromLocal(params.hive_tar_file,
-                  owner=params.webhcat_user,
-                  mode=0755,
-                  dest_dir=params.webhcat_apps_dir,
-                  kinnit_if_needed=kinit_if_needed,
-                  hdfs_user=params.hdfs_user,
-                  hadoop_bin_dir=params.hadoop_bin_dir,
-                  hadoop_conf_dir=params.hadoop_conf_dir
-    )
-
-  if (len(glob.glob(params.sqoop_tar_file)) > 0):
-    CopyFromLocal(params.sqoop_tar_file,
-                  owner=params.webhcat_user,
-                  mode=0755,
-                  dest_dir=params.webhcat_apps_dir,
-                  kinnit_if_needed=kinit_if_needed,
-                  hdfs_user=params.hdfs_user,
-                  hadoop_bin_dir=params.hadoop_bin_dir,
-                  hadoop_conf_dir=params.hadoop_conf_dir
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_server.py
deleted file mode 100644
index 088cb41..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-
-class WebHCatServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    webhcat_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.webhcat_pid_file)
-
-if __name__ == "__main__":
-  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service.py
deleted file mode 100644
index 41fb529..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-def webhcat_service(action='start'):
-  import params
-
-  cmd = format('env HADOOP_HOME={hadoop_home} {webhcat_bin_dir}/webhcat_server.sh')
-
-  if action == 'start':
-    demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps `cat {webhcat_pid_file}` >/dev/null 2>&1')
-    Execute(demon_cmd,
-            user=params.webhcat_user,
-            not_if=no_op_test
-    )
-  elif action == 'stop':
-    demon_cmd = format('{cmd} stop')
-    Execute(demon_cmd,
-            user=params.webhcat_user
-    )
-    Execute(format('rm -f {webhcat_pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service_check.py
deleted file mode 100644
index 8d15e47..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/scripts/webhcat_service_check.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def webhcat_service_check():
-  import params
-  File(format("{tmp_dir}/templetonSmoke.sh"),
-       content= StaticFile('templetonSmoke.sh'),
-       mode=0755
-  )
-
-  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-               " {security_param} {kinit_path_local}",
-               smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-          logoutput=True)
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/templates/startHiveserver2.sh.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/templates/startHiveserver2.sh.j2
deleted file mode 100644
index 641fb32..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/templates/startHiveserver2.sh.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.dir=$5"
-{% if hive_authorization_enabled == True and str(phd_stack_version).startswith('2.1') %}
-# HiveServer 2 -hiveconf options
-HIVE_SERVER2_OPTS="${HIVE_SERVER2_OPTS} -hiveconf hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator -hiveconf hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory "
-{% endif %}
-
-HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/configuration/nagios-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/configuration/nagios-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/configuration/nagios-env.xml
deleted file mode 100644
index fad8374..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/configuration/nagios-env.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <property-type>USER</property-type>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <property-type>GROUP</property-type>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_web_password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property require-input = "true">
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>


[15/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index be8c75f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export old_mark_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ -d $old_mark_dir ]] ; then
-  mv ${old_mark_dir} ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su -s /bin/bash - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()


[04/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
deleted file mode 100644
index 5730d4a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
+++ /dev/null
@@ -1,184 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <property-type>USER</property-type>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>min_user_id</name>
-    <value>1000</value>
-    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
-  </property>
-  <property>
-    <name>apptimelineserver_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-  </property>
-
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-export JAVA_HOME={{java64_home}}
-
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-# resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-# some Java parameters
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
-
-# For setting YARN specific HEAP sizes please use this
-# Parameter and set appropriately
-YARN_HEAPSIZE={{yarn_heapsize}}
-
-# check envvars which might override default args
-if [ "$YARN_HEAPSIZE" != "" ]; then
-  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-fi
-
-# Resource Manager specific parameters
-
-# Specify the max Heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the ResourceManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_RESOURCEMANAGER_OPTS=
-
-# Node Manager specific parameters
-
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-# Specify the max Heapsize for the HistoryManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1024.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_HISTORYSERVER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-# Specify the JVM options to be used when starting the NodeManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_NODEMANAGER_OPTS=
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-
-# default log directory and file
-if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-fi
-if [ "$YARN_LOGFILE" = "" ]; then
-  YARN_LOGFILE='yarn.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$YARN_POLICYFILE" = "" ]; then
-  YARN_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
-YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-log4j.xml
deleted file mode 100644
index 8c44b9e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#Relative to Yarn Log Dir Prefix
-yarn.log.dir=.
-#
-# Job Summary Appender
-#
-# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-#
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# LEVEL,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 20052de..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,413 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-    <description> The address of ResourceManager. </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-  </property>
-
-  <property>
-    <name>yarn.admin.acl</name>
-    <value></value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-    <description>The auxiliary service class to use </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>Location to aggregate logs to. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore</value>
-    <description>
-      Store class name for timeline store
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.generic-application-history.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-    <description>
-      Store class name for history store, defaulting to file system store
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/var/log/hadoop-yarn/timeline</value>
-    <description>
-      Store file name for leveldb timeline store
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.webapp.address</name>
-    <value>0.0.0.0:8188</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.webapp.https.address</name>
-    <value>0.0.0.0:8190</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.address</name>
-    <value>0.0.0.0:10200</value>
-    <description>
-      This is default address for the timeline server to start
-      the RPC server.
-    </description>
-  </property>
-  <property>
-    <description>Enable age off of timeline store data.</description>
-    <name>yarn.timeline-service.ttl-enable</name>
-    <value>true</value>
-  </property>
-  <property>
-    <description>Time to live for timeline store data in milliseconds.</description>
-    <name>yarn.timeline-service.ttl-ms</name>
-    <value>2678400000</value>
-  </property>
-  <property>
-    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-    <value>300000</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metainfo.xml
deleted file mode 100644
index 6f11908..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,249 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <displayName>YARN</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.4.1.phd.3.0.0.0</version>
-      <components>
-
-        <component>
-          <name>APP_TIMELINE_SERVER</name>
-          <displayName>App Timeline Server</displayName>
-          <category>MASTER</category>
-          <cardinality>0-1</cardinality>
-          <commandScript>
-            <script>scripts/application_timeline_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <displayName>ResourceManager</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REFRESHQUEUES</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <displayName>NodeManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>YARN_CLIENT</name>
-          <displayName>YARN Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>yarn-site.xml</fileName>
-              <dictionaryName>yarn-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>yarn-env.sh</fileName>
-              <dictionaryName>yarn-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>capacity-scheduler.xml</fileName>
-              <dictionaryName>capacity-scheduler</dictionaryName>
-            </configFile>                        
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-yarn</name>
-            </package>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.4.1.phd.3.0.0.0</version>
-      <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <displayName>History Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>YARN/RESOURCEMANAGER</co-locate>
-          </auto-deploy>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <displayName>MapReduce2 Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>mapred-site.xml</fileName>
-              <dictionaryName>mapred-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>mapred-env.sh</fileName>
-              <dictionaryName>mapred-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dir>configuration-mapred</configuration-dir>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-env</config-type>
-      </configuration-dependencies>
-    </service>
-
-  </services>
-</metainfo>


[05/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
deleted file mode 100644
index 4e21aad..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-os_family=$1
-shift
-
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su -s /bin/bash - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the counter and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export oozie_bin_dir=$2
-export hadoop_conf_dir=$3
-export hadoop_bin_dir=$4
-export smoke_test_user=$5
-export security_enabled=$6
-export smoke_user_keytab=$7
-export kinit_path_local=$8
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-
-if [ "$os_family" == "ubuntu" ] ; then
-  LIST_PACKAGE_FILES_CMD='dpkg-query -L'
-else
-  LIST_PACKAGE_FILES_CMD='rpm -ql'
-fi
-  
-
-export OOZIE_EXAMPLES_DIR=`$LIST_PACKAGE_FILES_CMD oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-if [[ -z "$OOZIE_EXAMPLES_DIR" ]] ; then
-  export OOZIE_EXAMPLES_DIR='/usr/phd/current/oozie-client/doc/'
-fi
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
-su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
-su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-echo $cmd
-job_info=`su -s /bin/bash - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id" 15
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/wrap_ooziedb.sh
deleted file mode 100644
index 36576b5..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie.py
deleted file mode 100644
index befea51..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-
-def oozie(is_server=False # TODO: see if see can remove this
-              ):
-  import params
-
-  if is_server:
-    params.HdfsDirectory(params.oozie_hdfs_user_dir,
-                         action="create",
-                         owner=params.oozie_user,
-                         mode=params.oozie_hdfs_user_mode
-    )
-  Directory( params.conf_dir,
-             recursive = True,
-             owner = params.oozie_user,
-             group = params.user_group
-  )
-  XmlConfig( "oozie-site.xml",
-    conf_dir = params.conf_dir,
-    configurations = params.config['configurations']['oozie-site'],
-    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0664
-  )
-  File(format("{conf_dir}/oozie-env.sh"),
-    owner=params.oozie_user,
-    content=InlineTemplate(params.oozie_env_sh_template)
-  )
-
-  if params.security_enabled:
-    tomcat_conf_dir = format("{tomcat_conf_secure}")
-  else:
-    tomcat_conf_dir = format("{tomcat_conf}")
-
-  File(format("{tomcat_conf_dir}/catalina.properties"),
-    content = Template("catalina.properties.j2"),
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0755
-  )
-
-  if (params.log4j_props != None):
-    File(format("{params.conf_dir}/oozie-log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.oozie_user,
-      content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
-    File(format("{params.conf_dir}/oozie-log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.oozie_user
-    )
-
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-
-  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
-     params.jdbc_driver_name == "org.postgresql.Driver" or \
-     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
-    curl -kf -x \"\" \
-    --retry 5 {jdk_location}{check_db_connection_jar_name}\
-     -o {check_db_connection_jar_name}'"),
-      not_if  = format("[ -f {check_db_connection_jar} ]"),
-      environment=environment
-    )
-    
-  oozie_ownership( )
-  
-  if is_server:      
-    oozie_server_specific( )
-  
-def oozie_ownership(
-):
-  import params
-  
-  File ( format("{conf_dir}/adminusers.txt"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/hadoop-config.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/oozie-default.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  Directory ( format("{conf_dir}/action-conf"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/action-conf/hive.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-def oozie_server_specific(
-):
-  import params
-  
-  File(params.pid_file,
-    action="delete",
-    not_if="ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
-  )
-  
-  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
-  Directory( oozie_server_directorties,
-    owner = params.oozie_user,
-    mode = 0755,
-    recursive = True
-  )
-
-  cmd1 = "sh"
-
-  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    cmd1 += format(" && cp {jdbc_driver_jar} {oozie_lib_dir}")
-
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  Execute( [cmd1],
-    not_if  = no_op_test
-  )
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_client.py
deleted file mode 100644
index f77a8db..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=False)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-if __name__ == "__main__":
-  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_server.py
deleted file mode 100644
index 70414fc..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_server.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=True)
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    #TODO remove this when config command will be implemented
-    self.configure(env)
-    oozie_service(action='start')
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-    
-if __name__ == "__main__":
-  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index 03608ae..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management import *
-
-def oozie_service(action = 'start'): # 'start' or 'stop'
-  import params
-
-  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    start_cmd = "service oozie start"
-    
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
-       params.jdbc_driver_name == "org.postgresql.Driver" or \
-       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-      
-    cmd1 =  "service oozie init"
-    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
-
-    if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
-      print format("ERROR: jdbc file {jdbc_driver_jar} is unavailable. Please, follow next steps:\n" \
-        "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
-        "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
-        "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
-        "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
-      exit(1)
-
-    if db_connection_check_command:
-      Execute( db_connection_check_command, tries=5, try_sleep=10)
-                  
-    Execute( cmd1,
-      not_if  = no_op_test,
-      ignore_failures = True
-    ) 
-    
-    Execute( cmd2,
-      user = params.oozie_user,
-      not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
-      path = params.execute_path
-    )
-    
-    Execute( start_cmd,
-      not_if  = no_op_test,
-    )
-  elif action == 'stop':
-    stop_cmd  = "service oozie stop"
-    Execute( stop_cmd,
-      only_if  = no_op_test
-    )
-
-  
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index b88aa6e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-import fnmatch
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  hadoop_bin_dir = "/usr/phd/current/hadoop-client/bin"
-  hadoop_lib_home = "/usr/phd/current/hadoop-client/lib"
-  hive_lib_dir = "/usr/phd/current/hive-client/lib"
-  oozie_lib_dir = "/usr/phd/current/oozie-client/"
-  oozie_setup_sh = "/usr/phd/current/oozie-client/bin/oozie-setup.sh"
-  oozie_webapps_dir = "/usr/phd/current/oozie-client/tomcat-deployment/webapps"
-  oozie_webapps_conf_dir = "/usr/phd/current/oozie-client/tomcat-deployment/conf"
-  oozie_libext_dir = "/usr/phd/current/oozie-client/libext"
-  oozie_server_dir = "/usr/phd/current/oozie-client/tomcat-deployment"
-  oozie_shared_lib = "/usr/phd/current/oozie-client/oozie-sharelib.tar.gz"
-  oozie_home = "/usr/phd/current/oozie-client"
-  oozie_bin_dir = "/usr/phd/current/oozie-client/bin"
-  falcon_home = '/usr/phd/current/falcon-client'
-  tomcat_conf = "/etc/oozie/tomcat-conf.http/conf"
-  tomcat_conf_secure = "/etc/oozie/tomcat-conf.https/conf"
-
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_lib_home = "/usr/lib/hadoop/lib"
-  hive_lib_dir = "/usr/lib/hive/lib"
-  oozie_lib_dir = "/var/lib/oozie/"
-  oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
-  oozie_webapps_dir = "/var/lib/oozie/tomcat-deployment/webapps/"
-  oozie_webapps_conf_dir = "/var/lib/oozie/tomcat-deployment/conf"
-  oozie_libext_dir = "/usr/lib/oozie/libext"
-  oozie_server_dir = "/var/lib/oozie/tomcat-deployment"
-  oozie_shared_lib = "/usr/lib/oozie/oozie-sharelib.tar.gz"
-  oozie_home = "/usr/lib/oozie"
-  oozie_bin_dir = "/usr/bin"
-  falcon_home = '/usr/lib/falcon'
-  tomcat_conf = "/etc/oozie/tomcat-conf.http/conf"
-  tomcat_conf_secure = "/etc/oozie/tomcat-conf.https/conf"
-
-execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-conf_dir = "/etc/oozie/conf"
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-hive_jar_files = ""
-
-if not os.path.exists(hive_lib_dir):
-    raise Fail("Could not find Hive library directory: %s" % (hive_lib_dir))
-
-for entry in os.listdir(hive_lib_dir):
-    absolute_path = os.path.join(hive_lib_dir, entry)
-    if os.path.isfile(absolute_path) and not os.path.islink(absolute_path):
-        if fnmatch.fnmatchcase(entry, "hive-*.jar"):
-            if (len(hive_jar_files) == 0):
-                hive_jar_files = absolute_path
-            else:
-                hive_jar_files = hive_jar_files + "," + absolute_path
-
-catalina_properties_common_loader = "/usr/lib/hive-hcatalog/share/hcatalog/*.jar,/usr/lib/hive-hcatalog/share/webhcat/java-client/*.jar"
-
-if (len(hive_jar_files) != 0):
-    catalina_properties_common_loader = hive_jar_files + "," + catalina_properties_common_loader
-
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
-oozie_env_sh_template = config['configurations']['oozie-env']['content']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-
-java_home = config['hostLevelParams']['java_home']
-oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
-oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
-oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
-oozie_env_sh_template = config['configurations']['oozie-env']['content']
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-
-put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
-  
-jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-elif jdbc_driver_name == "org.postgresql.Driver":
-  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-else:
-  jdbc_driver_jar = ""
-
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-has_falcon_host = not len(falcon_host)  == 0
-
-#oozie-log4j.properties
-if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
-  log4j_props = config['configurations']['oozie-log4j']['content']
-else:
-  log4j_props = None
-
-oozie_hdfs_user_mode = 0775
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index 231ee34..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class OozieServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    # on PHD1 this file is different
-    smoke_test_file_name = 'oozieSmoke2.sh'
-
-    oozie_smoke_shell_file( smoke_test_file_name)
-  
-def oozie_smoke_shell_file(
-  file_name
-):
-  import params
-
-  File( format("{tmp_dir}/{file_name}"),
-    content = StaticFile(file_name),
-    mode = 0755
-  )
-  
-  os_family = System.get_instance().os_family
-  
-  if params.security_enabled:
-    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
-  else:
-    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")
-
-  Execute( format("{tmp_dir}/{file_name}"),
-    command   = sh_cmd,
-    path      = params.execute_path,
-    tries     = 3,
-    try_sleep = 5,
-    logoutput = True
-  )
-    
-if __name__ == "__main__":
-  OozieServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index a665449..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/catalina.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/catalina.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/catalina.properties.j2
deleted file mode 100644
index 96fa996..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/catalina.properties.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# List of comma-separated packages that start with or equal this string
-# will cause a security exception to be thrown when
-# passed to checkPackageAccess unless the
-# corresponding RuntimePermission ("accessClassInPackage."+package) has
-# been granted.
-package.access=sun.,org.apache.catalina.,org.apache.coyote.,org.apache.tomcat.,org.apache.jasper.,sun.beans.
-#
-# List of comma-separated packages that start with or equal this string
-# will cause a security exception to be thrown when
-# passed to checkPackageDefinition unless the
-# corresponding RuntimePermission ("defineClassInPackage."+package) has
-# been granted.
-#
-# by default, no packages are restricted for definition, and none of
-# the class loaders supplied with the JDK call checkPackageDefinition.
-#
-package.definition=sun.,java.,org.apache.catalina.,org.apache.coyote.,org.apache.tomcat.,org.apache.jasper.
-
-#
-#
-# List of comma-separated paths defining the contents of the "common"
-# classloader. Prefixes should be used to define what is the repository type.
-# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
-# If left as blank,the JVM system loader will be used as Catalina's "common"
-# loader.
-# Examples:
-#     "foo": Add this folder as a class repository
-#     "foo/*.jar": Add all the JARs of the specified folder as class
-#                  repositories
-#     "foo/bar.jar": Add bar.jar as a class repository
-common.loader=/var/lib/oozie/*.jar,/usr/lib/hadoop/client/*.jar,{{catalina_properties_common_loader}},/usr/lib/oozie/libserver/*.jar,${catalina.home}/lib,${catalina.home}/lib/*.jar
-
-#
-# List of comma-separated paths defining the contents of the "server"
-# classloader. Prefixes should be used to define what is the repository type.
-# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
-# If left as blank, the "common" loader will be used as Catalina's "server"
-# loader.
-# Examples:
-#     "foo": Add this folder as a class repository
-#     "foo/*.jar": Add all the JARs of the specified folder as class
-#                  repositories
-#     "foo/bar.jar": Add bar.jar as a class repository
-server.loader=
-
-#
-# List of comma-separated paths defining the contents of the "shared"
-# classloader. Prefixes should be used to define what is the repository type.
-# Path may be relative to the CATALINA_BASE path or absolute. If left as blank,
-# the "common" loader will be used as Catalina's "shared" loader.
-# Examples:
-#     "foo": Add this folder as a class repository
-#     "foo/*.jar": Add all the JARs of the specified folder as class
-#                  repositories
-#     "foo/bar.jar": Add bar.jar as a class repository
-# Please note that for single jars, e.g. bar.jar, you need the URL form
-# starting with file:.
-shared.loader=
-
-#
-# String cache configuration.
-tomcat.util.buf.StringCache.byte.enabled=true
-#tomcat.util.buf.StringCache.char.enabled=true
-#tomcat.util.buf.StringCache.trainThreshold=500000
-#tomcat.util.buf.StringCache.cacheSize=5000
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
deleted file mode 100644
index 8c9f25e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
+++ /dev/null
@@ -1,92 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-env.xml
deleted file mode 100644
index aded45f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-env.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- pig-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for pig-env.sh file</description>
-    <value>
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-if [ -d "/usr/lib/tez" ]; then
-  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
-fi
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-log4j.xml
deleted file mode 100644
index 4fe323c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-log4j.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-properties.xml
deleted file mode 100644
index cee3211..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/configuration/pig-properties.xml
+++ /dev/null
@@ -1,92 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Describe all the Pig agent configurations</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-# debug level, INFO is default
-debug=INFO
-
-# verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-# exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-# Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-# Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-
-# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-# This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-# the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-# Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-# Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
-
-# Avoid pig failures when multiple jobs write to the same location
-pig.location.check.strict=false
-
-hcat.bin=/usr/bin/hcat
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/metainfo.xml
deleted file mode 100644
index 1b5f681..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <displayName>Pig</displayName>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.12.1.phd.3.0.0.0</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <displayName>Pig</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>pig-env.sh</fileName>
-              <dictionaryName>pig-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>pig-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>pig.properties</fileName>
-              <dictionaryName>pig-properties</dictionaryName>
-            </configFile>                         
-          </configFiles>          
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>pig-env</config-type>
-        <config-type>pig-log4j</config-type>
-        <config-type>pig-properties</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/params.py
deleted file mode 100644
index ac1104d..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  hadoop_bin_dir = "/usr/phd/current/hadoop-client/bin"
-  hadoop_home = '/usr/phd/current/hadoop-client'
-  pig_bin_dir = '/usr/phd/current/pig-client/bin'
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  pig_bin_dir = ""
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-pig_conf_dir = "/etc/pig/conf"
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-pig_env_sh_template = config['configurations']['pig-env']['content']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-pig_properties = config['configurations']['pig-properties']['content']
-
-log4j_props = config['configurations']['pig-log4j']['content']

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig.py
deleted file mode 100644
index afdba8d..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    recursive = True,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  File(format("{pig_conf_dir}/pig-env.sh"),
-    owner=params.hdfs_user,
-    content=InlineTemplate(params.pig_env_sh_template)
-  )
-
-  # pig_properties is always set to a default even if it's not in the payload
-  File(format("{params.pig_conf_dir}/pig.properties"),
-              mode=0644,
-              group=params.user_group,
-              owner=params.hdfs_user,
-              content=params.pig_properties
-  )
-
-  if (params.log4j_props != None):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user,
-      content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index 931dceb..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from pig import pig
-
-
-class PigClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/service_check.py
deleted file mode 100644
index 7619bd6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/PIG/package/scripts/service_check.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class PigServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    input_file = 'passwd'
-    output_file = "pigsmoke.out"
-
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop --config {hadoop_conf_dir} dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
-    test_cmd = format("fs -test -e {output_file}")
-
-    ExecuteHadoop( create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir,
-      # for kinit run
-      keytab = params.smoke_user_keytab,
-      security_enabled = params.security_enabled,
-      kinit_path_local = params.kinit_path_local,
-      bin_dir = params.hadoop_bin_dir
-    )
-
-    File( format("{tmp_dir}/pigSmoke.sh"),
-      content = StaticFile("pigSmoke.sh"),
-      mode = 0755
-    )
-
-    Execute( format("pig {tmp_dir}/pigSmoke.sh"),
-      tries     = 3,
-      try_sleep = 5,
-      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
-      user      = params.smokeuser
-    )
-
-    ExecuteHadoop( test_cmd,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir,
-      bin_dir = params.hadoop_bin_dir
-    )
-
-if __name__ == "__main__":
-  PigServiceCheck().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-env.xml
deleted file mode 100644
index 14ae20b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-env.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <property-type>USER</property-type>
-    <description>Mapreduce User</description>
-  </property>
-  <property>
-    <name>jobhistory_heapsize</name>
-    <value>900</value>
-    <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
-  </property>
-  
-  <!-- mapred-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for mapred-env.sh file</description>
-    <value>
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
-
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-site.xml
deleted file mode 100644
index 7955cb2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration-mapred/mapred-site.xml
+++ /dev/null
@@ -1,360 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-<!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-    <description>
-      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for map tasks.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for reduce tasks.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index 6c21848..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,132 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration supports_final="false" supports_adding_forbidden="true">
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>
-      Default minimum queue resource limit depends on the number of users who have submitted applications.
-    </description>
-  </property>
-
-
-</configuration>


[08/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
deleted file mode 100644
index 7a622b6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
+++ /dev/null
@@ -1,326 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import sys
-import subprocess
-import os
-
-N_SGN = 'NAGIOS_SERVICEGROUPNAME'
-N_SD = 'NAGIOS_SERVICEDESC'
-N_HOST = 'NAGIOS_HOSTNAME'
-
-LIST_SEPARATOR = "--"
-HOSTNAME_PLACEHOLDER = "^^"
-IGNORE_DAT_FILE = "/var/nagios/ignore.dat"
-
-# Mode constants
-OR = 0
-AND = 1
-ENV_ONLY = 2
-FILTER_MM = 3
-LEGACY_CHECK_WRAPPER = 4
-MODES = ['or', 'and', 'env_only', 'filter_mm', 'legacy_check_wrapper']
-
-
-def ignored_host_list(service, component):
-  """
-  :param service: current service
-  :param component: current component
-  :return: all hosts where specified host component is in ignored state
-  """
-  try:
-    with open(IGNORE_DAT_FILE) as f:
-      lines = f.readlines()
-  except IOError:
-    return []
-  result = []
-  if lines:
-    for l in lines:
-      tokens = l.split(' ')
-      if len(tokens) == 3 and tokens[1] == service and tokens[2].strip() == component:
-        result.append(tokens[0])
-  return result
-
-
-def get_real_service():
-  try:
-    service = os.environ[N_SGN]  # e.g. 'HBASE'
-  except KeyError:
-    service = ''
-  return service
-
-
-def get_real_component():
-  try:
-    arr_desc = os.environ[N_SD]  # e.g. 'HBASE::Percent RegionServers live'
-    SEPARATOR = "::"
-    comp_name = arr_desc.replace(SEPARATOR, ' ').split(' ')[0]
-  except KeyError:
-    comp_name = ''
-  mapping = {
-    'HBASEMASTER': 'HBASE_MASTER',
-    'REGIONSERVER': 'HBASE_REGIONSERVER',
-    'JOBHISTORY': 'MAPREDUCE2',
-    'HIVE-METASTORE': 'HIVE_METASTORE',
-    'HIVE-SERVER': 'HIVE_SERVER',
-    'FLUME': 'FLUME_HANDLER',
-    'HUE': 'HUE_SERVER',
-    'WEBHCAT': 'WEBHCAT_SERVER',
-  }
-  if comp_name in mapping:
-    comp_name = mapping.get(comp_name)
-  return comp_name
-
-
-def check_output(*popenargs, **kwargs):
-  """
-  Imitate subprocess.check_output() for python 2.6
-  """
-  process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                             *popenargs, **kwargs)
-  output, unused_err = process.communicate()
-  retcode = process.poll()
-  if retcode:
-    cmd = kwargs.get("args")
-    if cmd is None:
-      cmd = popenargs[0]
-    err = subprocess.CalledProcessError(retcode, cmd)
-    # Monkey-patching for python 2.6
-    err.output = output
-    raise err
-  return output
-
-
-def print_usage():
-  """
-  Prints usage and exits with a non-zero exit code
-  """
-  print "Usage: mm_wrapper.py MODE HOST1 HOST2 .. HOSTN %s command arg1 arg2 .. argN" % LIST_SEPARATOR
-  print "MODE is one of the following: or, and, env_only, filter_mm, legacy_check_wrapper"
-  print "%s is a separator between list of hostnames and command with args" % LIST_SEPARATOR
-  print "%s is used as a hostname placeholder at command args" % HOSTNAME_PLACEHOLDER
-  print "Also script provides $MM_HOSTS shell variable to commands"
-  print "NOTE: Script makes use of Nagios-populated env vars %s and %s" % (N_SGN, N_SD)
-  print "For more info, please see docstrings at %s" % os.path.realpath(__file__)
-  sys.exit(1)
-
-
-def parse_args(args):
-  if not args or not LIST_SEPARATOR in args or args[0] not in MODES:
-    print_usage()
-  else:
-    mode = MODES.index(args[0])  # identify operation mode
-    args = args[1:]  # Shift args left
-    hostnames = []
-    command_line = []
-    # Parse command line args
-    passed_separator = False  # True if met LIST_SEPARATOR
-    for arg in args:
-      if not passed_separator:
-        if arg != LIST_SEPARATOR:
-          hostnames.append(arg)
-        else:
-          passed_separator = True
-      else:
-        if arg != LIST_SEPARATOR:
-          command_line.append(arg)
-        else:  # Something definitely goes wrong
-          print "Could not parse arguments: " \
-                "There is more than one %s argument." % LIST_SEPARATOR
-          print_usage()
-
-    if not command_line:
-      print "No command provided."
-      print_usage()
-    return mode, hostnames, command_line
-
-
-def do_work(mode, hostnames, command_line):
-  # Execute commands
-  ignored_hosts = ignored_host_list(get_real_service(), get_real_component())
-  empty_check_result = {
-    'message': 'No checks have been run (no hostnames provided)',
-    'retcode': -1,
-    'real_retcode': None
-  }
-  custom_env = os.environ.copy()
-  if ignored_hosts:
-    custom_env['MM_HOSTS'] = \
-      reduce(lambda a, b: "%s %s" % (a, b), ignored_hosts)
-  if mode == OR:
-    check_result = work_in_or_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  elif mode == AND:
-    check_result = work_in_and_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  elif mode == ENV_ONLY:
-    check_result = work_in_env_only_mode(hostnames, command_line, custom_env)
-  elif mode == FILTER_MM:
-    check_result = work_in_filter_mm_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result)
-  else:  # mode == LEGACY_CHECK_WRAPPER:
-    check_result = work_in_legacy_check_wrapper_mode(ignored_hosts, command_line, custom_env)
-  # Build the final output
-  final_output = []
-  output = check_result.get('message')
-  if output is not None:
-    for string in output.splitlines():
-      final_output.append(string.strip())
-  real_retcode = check_result.get('real_retcode')
-  if real_retcode:
-    # This string is used at check_aggregate.php when aggregating alerts
-    final_output.append("AMBARIPASSIVE=%s" % real_retcode)
-  return final_output, check_result.get('retcode')
-
-
-def work_in_or_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  check_result = empty_check_result
-  for hostname in hostnames:
-    concrete_command_line = map(  # Substitute hostname where needed
-                                  lambda x: hostname if x == HOSTNAME_PLACEHOLDER else x,
-                                  command_line)
-    try:
-      returncode = 0
-      real_retcode = None
-      message = check_output(concrete_command_line, env=custom_env)
-    except subprocess.CalledProcessError, e:
-      if hostname not in ignored_hosts:
-        returncode = e.returncode
-      else:  # Host is in MM
-        real_retcode = e.returncode
-      message = e.output
-    really_positive_result = hostname not in ignored_hosts and returncode == 0
-    if check_result.get('retcode') <= returncode or really_positive_result:
-      check_result = {
-        'message': message,
-        'retcode': returncode,
-        'real_retcode': real_retcode  # Real (not suppressed) program retcode
-      }
-    if really_positive_result:
-      break  # Exit on first real success
-  return check_result
-
-
-def work_in_and_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  check_result = empty_check_result
-  for hostname in hostnames:
-    concrete_command_line = map(  # Substitute hostname where needed
-                                  lambda x: hostname if x == HOSTNAME_PLACEHOLDER else x,
-                                  command_line)
-    try:
-      returncode = 0
-      real_retcode = None
-      message = check_output(concrete_command_line, env=custom_env)
-    except subprocess.CalledProcessError, e:
-      if hostname not in ignored_hosts:
-        returncode = e.returncode
-      else:
-        real_retcode = e.returncode
-      message = e.output
-    if check_result.get('retcode') <= returncode:
-      check_result = {
-        'message': message,
-        'retcode': returncode,
-        'real_retcode': real_retcode  # Real (not suppressed) program retcode
-      }
-  return check_result
-
-
-def work_in_env_only_mode(hostnames, command_line, custom_env):
-  concrete_command_line = []
-  for item in command_line:
-    if item == HOSTNAME_PLACEHOLDER:
-      concrete_command_line.extend(hostnames)
-    else:
-      concrete_command_line.append(item)
-  try:
-    returncode = 0
-    message = check_output(concrete_command_line, env=custom_env)
-  except subprocess.CalledProcessError, e:
-    returncode = e.returncode
-    message = e.output
-  check_result = {
-    'message': message,
-    'retcode': returncode,
-    'real_retcode': None  # Real (not suppressed) program retcode
-  }
-  return check_result
-
-
-def work_in_filter_mm_mode(hostnames, ignored_hosts, command_line, custom_env, empty_check_result):
-  not_mm_hosts = [hostname for hostname in hostnames if hostname not in ignored_hosts]
-  if not not_mm_hosts:  # All hosts have been filtered
-    return empty_check_result
-  else:
-    return work_in_env_only_mode(not_mm_hosts, command_line, custom_env)
-
-
-def work_in_legacy_check_wrapper_mode(ignored_hosts, command_line, custom_env):
-  host = os.environ[N_HOST]
-  result = work_in_env_only_mode([host], command_line, custom_env)
-  real_retcode = result['retcode']
-  if host in ignored_hosts and real_retcode != 0:  # Ignore fail
-    result['retcode'] = 0
-    result['real_retcode'] = real_retcode
-  return result
-
-
-def main():
-  """
-  This script allows to run nagios service check commands for host components
-  located at different hosts.
-  Also script passes to every command a $MM_HOSTS shell variable with a list of
-  hosts that are in MM
-
-  or mode: return 0 exit code if at least one service check succeeds.
-  Command exits on a first success.
-  Failures for host components that are in MM are suppressed (return code
-  is set to 0).
-  If command fails for all provided hostnames, script returns alert with the
-  greatest exit code value.
-
-  and mode:
-  Perform checks of all host components (effectively ignoring negative results
-  for MM components). If service check is successful for all hosts, script
-  also returns zero exit code. Otherwise alert with the greatest exit code is
-  returned.
-
-  env_only mode:
-  Pass list of all hosts to command and run it once. The only role of
-  mm_wrapper script in this mode is to provide properly initialized
-  $MM_HOSTS env variable to command being run. All duties of ignoring failures
-  of MM host components are delegated to a command being run.
-
-  filter_mm
-  Similar to env_only mode. The only difference is that hostnames for
-  host components that are in MM are filtered (not passed to command at all)
-
-  legacy_check_wrapper
-  Designed as a drop-in replacement for check_wrapper.sh . It reads $NAGIOS_HOSTNAME
-  env var and ignores check results if host component on this host is in MM.
-  When host subtitution symbol is encountered, hostname defined by $NAGIOS_HOSTNAME
-  is substituted,
-  """
-  args = sys.argv[1:]  # Shift args left
-  mode, hostnames, command_line = parse_args(args)
-  output, ret_code = do_work(mode, hostnames, command_line)
-  for line in output:
-    print line
-  sys.exit(ret_code)
-
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/nagios_alerts.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/nagios_alerts.php b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/nagios_alerts.php
deleted file mode 100644
index 0e1e501..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/nagios_alerts.php
+++ /dev/null
@@ -1,513 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Constants. */
-define("HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES", "Properties");
-define("HDP_MON_RESPONSE_OPTION_KEY__TYPE", "Type");
-
-define("HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE", "Uncacheable");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON", "JSON");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT", "JAVASCRIPT");
-
-define("HDP_MON_QUERY_ARG__JSONP", "jsonp");
-
-/** Spits out appropriate response headers, as per the options passed in. */
-function hdp_mon_generate_response_headers( $response_options )
-{
-  if( $response_options[HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES] == HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE )
-  {
-    // Make the response uncache-able.
-    header("Expires: Mon, 26 Jul 1997 05:00:00 GMT"); // Date in the past
-    header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT"); // Always modified
-    header("Cache-Control: no-cache, must-revalidate"); // HTTP/1.1
-    header("Pragma: no-cache"); // HTTP/1.0
-  }
-
-  switch( $response_options[HDP_MON_RESPONSE_OPTION_KEY__TYPE] )
-  {
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON:
-      {
-        header('Content-type: application/json');
-      }
-      break;
-
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT:
-      {
-        header('Content-type: application/javascript');
-      }
-      break;
-  }
-}
-
-/** Given $response_data (which we expect to be a JSON string), generate an
- *  HTTP response, which includes emitting the necessary HTTP response headers
- *  followed by the response body (that is either plain ol' $response_data,
- *  or a JSONP wrapper around it).
- */
-function hdp_mon_generate_response( $response_data )
-{
-  $jsonpFunctionName = NULL;
-  if (isset($_GET[HDP_MON_QUERY_ARG__JSONP])) {
-    $jsonpFunctionName = $_GET[HDP_MON_QUERY_ARG__JSONP];
-  }
-
-  hdp_mon_generate_response_headers( array
-  ( HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES => HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE,
-  HDP_MON_RESPONSE_OPTION_KEY__TYPE =>
-  isset( $jsonpFunctionName )  && $jsonpFunctionName != "" ?
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT :
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON ) );
-
-  if( isset( $jsonpFunctionName ) )
-  {
-    echo "$jsonpFunctionName( $response_data );";
-  }
-  else
-  {
-    echo $response_data;
-  }
-}
-
-  /* alert_type { ok, non-ok, warning, critical, all } */
-  define ("all", "-2");
-  define ("nok", "-1");
-  define ("ok", "0");
-  define ("warn", "1");
-  define ("critical", "2");
-
-  define ("HDFS_SERVICE_CHECK", "NAMENODE::NameNode process down");
-  define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::JobTracker process down");
-  define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster process down");
-  define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent ZooKeeper Servers down");
-  define ("HIVE_SERVICE_CHECK", "HIVE-METASTORE::Hive Metastore status check");
-  define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie Server status check");
-  define ("WEBHCAT_SERVICE_CHECK", "WEBHCAT::WebHCat Server status check");
-  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
-
-  // on SUSE, some versions of Nagios stored data in /var/lib
-  $status_file = "/var/nagios/status.dat";
-  if (!file_exists($status_file) && file_exists("/etc/SuSE-release")) {
-    $status_file = "/var/lib/nagios/status.dat";
-  }
-  
-  $q1="";
-  if (array_key_exists('q1', $_GET)) {
-    $q1=$_GET["q1"];
-  }
-  $q2="";
-  if (array_key_exists('q2', $_GET)) {
-    $q2=$_GET["q2"];
-  }
-  $alert_type="";
-  if (array_key_exists('alert_type', $_GET)) {
-    $alert_type=$_GET["alert_type"];
-  }
-  $host="";
-  if (array_key_exists('host_name', $_GET)) {
-    $host=$_GET["host_name"];
-  }
-  $indent="";
-  if (array_key_exists('indent', $_GET)) {
-    $indent=$_GET["indent"];
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  if ($q1 == "alerts") {
-    /* Add the service status object to result array */
-    $result['alerts'] = query_alerts ($status_file_content, $alert_type, $host);
-  }
-
-  if ($q2 == "hosts") {
-    /* Add the service status object to result array */
-    $result['hosts'] = query_hosts ($status_file_content, $alert_type, $host);
-  }
-
-  /* Add host count object to the results */
-  $result['hostcounts'] = query_host_count ($status_file_content);
-
-  /* Add services runtime states */
-  $result['servicestates'] = query_service_states ($status_file_content);
-
-  /* Return results */
-  if ($indent == "true") {
-    hdp_mon_generate_response(indent(json_encode($result)));
-  } else {
-    hdp_mon_generate_response(json_encode($result));
-  }
-
-  # Functions
-  /* Query service states */
-  function query_service_states ($status_file_content) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $services_object = array ();
-    $services_object["PUPPET"] = 0;
-    foreach ($matches[0] as $object) {
-
-      if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
-        $services_object["HDFS"] = getParameter($object, "last_hard_state");
-        if ($services_object["HDFS"] >= 1) {
-          $services_object["HDFS"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == MAPREDUCE_SERVICE_CHECK) {
-        $services_object["MAPREDUCE"] = getParameter($object, "last_hard_state");
-        if ($services_object["MAPREDUCE"] >= 1) {
-          $services_object["MAPREDUCE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HBASE_SERVICE_CHECK) {
-        $services_object["HBASE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HBASE"] >= 1) {
-          $services_object["HBASE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HIVE_SERVICE_CHECK) {
-        $services_object["HIVE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HIVE"] >= 1) {
-          $services_object["HIVE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == OOZIE_SERVICE_CHECK) {
-        $services_object["OOZIE"] = getParameter($object, "last_hard_state");
-        if ($services_object["OOZIE"] >= 1) {
-          $services_object["OOZIE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == WEBHCAT_SERVICE_CHECK) {
-        $services_object["WEBHCAT"] = getParameter($object, "last_hard_state");
-        if ($services_object["WEBHCAT"] >= 1) {
-          $services_object["WEBHCAT"] = 1;
-        }
-        continue;
-      }
-      /* In case of zookeeper, service is treated running if alert is ok or warning (i.e partial
-       * instances of zookeepers are running
-       */
-      if (getParameter($object, "service_description") == ZOOKEEPER_SERVICE_CHECK) {
-        $services_object["ZOOKEEPER"] = getParameter($object, "last_hard_state");
-        if ($services_object["ZOOKEEPER"] <= 1) {
-          $services_object["ZOOKEEPER"] = 0;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
-        $state = getParameter($object, "last_hard_state");
-        if ($state >= 1) {
-          $services_object["PUPPET"]++;
-        }
-        continue;
-      }
-    }
-    if ($services_object["PUPPET"] >= 1) {
-      $services_object["PUPPET"] = 1;
-    }
-    $services_object = array_map('strval', $services_object);
-    return $services_object;
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $up_hosts = 0;
-    $down_hosts = 0;
-
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "last_hard_state") != ok) {
-        $down_hosts++;
-      } else {
-        $up_hosts++;
-      }
-    }
-    $hostcounts_object['up_hosts'] = $up_hosts;
-    $hostcounts_object['down_hosts'] = $down_hosts;
-    $hostcounts_object = array_map('strval', $hostcounts_object);
-    return $hostcounts_object;
-  }
-
-  /* Query Hosts */
-  function query_hosts ($status_file_content, $alert_type, $host) {
-    $hoststatus_attributes = array ("host_name", "current_state", "last_hard_state",
-                              "plugin_output", "last_check", "current_attempt",
-                              "last_hard_state_change", "last_time_up", "last_time_down",
-                              "last_time_unreachable", "is_flapping", "last_check");
-
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hosts_objects = array ();
-    $i = 0;
-    foreach ($matches[0] as $object) {
-      $hoststatus = array ();
-      $chost = getParameter($object, "host_name");
-      if (empty($host) || $chost == $host) {
-        foreach ($hoststatus_attributes as $attrib) {
-          $hoststatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-        }
-        $hoststatus['alerts'] = query_alerts ($status_file_content, $alert_type, $chost);
-        if (!empty($host)) {
-          $hosts_objects[$i] = $hoststatus;
-          $i++;
-          break;
-        }
-      }
-      if (!empty($hoststatus)) {
-        $hosts_objects[$i] = $hoststatus;
-        $i++;
-      }
-    }
-    /* echo "COUNT : " . count ($services_objects) . "\n"; */
-    return $hosts_objects;
-  }
-
-  /* Query Alerts */
-  function query_alerts ($status_file_content, $alert_type, $host) {
-
-    $servicestatus_attributes = array ("service_description", "host_name", "current_attempt",
-                                       "current_state", "plugin_output", "last_hard_state_change", "last_hard_state",
-                                       "last_time_ok", "last_time_warning", "last_time_unknown",
-                                       "last_time_critical", "is_flapping", "last_check",
-                                       "long_plugin_output");
-
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    #echo $matches[0][0] . ", " . $matches[0][1] . "\n";
-    #echo $matches[1][0] . ", " . $matches[1][1] . "\n";
-    $services_objects = array ();
-    $i = 0;
-    foreach ($matches[1] as $object) {      
-      $servicestatus = getParameterMap($object, $servicestatus_attributes);
-      switch ($alert_type) {
-      case "all":
-        if (empty($host) || $servicestatus['host_name'] == $host) {
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "nok":
-        if (getParameterMapValue($map, "last_hard_state") != ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "ok":
-        if (getParameterMapValue($map, "last_hard_state") == ok &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "warn":
-        if (getParameterMapValue($map, "last_hard_state") == warn &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "critical":
-        if (getParameterMapValue($map, "last_hard_state") == critical &&
-           (empty($host) || getParameterMapValue($map, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameterMapValue($map, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      }
-      
-      if (!empty($servicestatus)) {
-        $services_objects[$i] = $servicestatus;
-        $i++;
-      }
-    }
-
-    // echo "COUNT : " . count ($services_objects) . "\n";
-    return $services_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-	  case "DATANODE":
-      case "NAMENODE":
-      case "JOURNALNODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-	  case "TASKTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-      case "REGIONSERVER":
-        $pieces[0] = "HBASE";
-        break;
-      case "HIVE-METASTORE":
-      case "HIVE-SERVER":
-      case "WEBHCAT":
-        $pieces[0] = "HIVE";
-        break;
-      case "ZKSERVERS":
-	    $pieces[0] = "ZOOKEEPER";
-        break;
-      case "AMBARI":
-	    $pieces[0] = "AMBARI";
-      break;
-      case "FLUME":
-            $pieces[0] = "FLUME";
-      break;      
-      case "JOBHISTORY":
-        $pieces[0] = "MAPREDUCE2";
-        break;
-      case "RESOURCEMANAGER":
-      case "APP_TIMELINE_SERVER":
-      case "NODEMANAGER":
-        $pieces[0] = "YARN";
-        break;
-      case "STORM_UI_SERVER":
-      case "NIMBUS":
-      case "DRPC_SERVER":
-      case "SUPERVISOR":
-      case "STORM_REST_API":
-        $pieces[0] = "STORM";
-        break;
-      case "NAGIOS":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "ZOOKEEPER":
-      case "OOZIE":
-      case "GANGLIA":
-      case "STORM":
-      case "FALCON":
-      case "PUPPET":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-  function getParameterMapValue($map, $key) {
-    $value = $map[$key];
-
-    if (!is_null($value))
-      return "" . $value;
-
-    return "";
-  }
-
-
-  function getParameterMap($object, $keynames) {
-
-    $cnt = preg_match_all('/\t([\S]*)=[\n]?[\t]?([\S= ]*)/', $object, $matches, PREG_PATTERN_ORDER);
-
-    $tmpmap = array_combine($matches[1], $matches[2]);
-
-    $map = array();
-    foreach ($keynames as $key) {
-      $map[$key] = htmlentities($tmpmap[$key], ENT_COMPAT);
-    }
-
-    return $map;
-  }
-  
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/sys_logger.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/sys_logger.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/sys_logger.py
deleted file mode 100644
index 6683342..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/sys_logger.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import sys
-import syslog
-
-# dictionary of state->severity mappings
-severities = {'UP':'OK', 'DOWN':'Critical', 'UNREACHABLE':'Critical', 'OK':'OK',
-              'WARNING':'Warning', 'UNKNOWN':'Warning', 'CRITICAL':'Critical'}
-
-# List of services which can result in events at the Degraded severity
-degraded_alert_services = ['HBASEMASTER::HBaseMaster CPU utilization',
-                           'HDFS::Namenode RPC Latency',
-                           'MAPREDUCE::JobTracker RPC Latency',
-                           'JOBTRACKER::Jobtracker CPU utilization']
-
-# List of services which can result in events at the Fatal severity
-fatal_alert_services = ['NAMENODE::Namenode Process down',
-                        'NAMENODE::NameNode process']
-
-# dictionary of service->msg_id mappings
-msg_ids = {'Host::Ping':'host_down',
-           'HBASEMASTER::HBaseMaster CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS Capacity utilization':'hdfs_percent_capacity',
-           'HDFS::Corrupt/Missing blocks':'hdfs_block',
-           'NAMENODE::Namenode Edit logs directory status':'namenode_edit_log_write',
-           'HDFS::Percent DataNodes down':'datanode_down',
-           'DATANODE::Process down':'datanode_process_down',
-           'HDFS::Percent DataNodes storage full':'datanodes_percent_storage_full',
-           'NAMENODE::Namenode Process down':'namenode_process_down',
-           'HDFS::Namenode RPC Latency':'namenode_rpc_latency',
-           'DATANODE::Storage full':'datanodes_storage_full',
-           'JOBTRACKER::Jobtracker Process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC Latency':'jobtracker_rpc_latency',
-           'MAPREDUCE::Percent TaskTrackers down':'tasktrackers_down',
-           'TASKTRACKER::Process down':'tasktracker_process_down',
-           'HBASEMASTER::HBaseMaster Process down':'hbasemaster_process_down',
-           'REGIONSERVER::Process down':'regionserver_process_down',
-           'HBASE::Percent region servers down':'regionservers_down',
-           'HIVE-METASTORE::HIVE-METASTORE status check':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent zookeeper servers down':'zookeepers_down',
-           'ZKSERVERS::ZKSERVERS Process down':'zookeeper_process_down',
-           'OOZIE::Oozie status check':'oozie_down',
-           'TEMPLETON::Templeton status check':'templeton_down',
-           'PUPPET::Puppet agent down':'puppet_down',
-           'NAGIOS::Nagios status log staleness':'nagios_status_log_stale',
-           'GANGLIA::Ganglia [gmetad] Process down':'ganglia_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for namenode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia collector [gmond] Process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary Namenode Process down':'secondary_namenode_process_down',
-           'JOBTRACKER::Jobtracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Web UI down':'hbase_ui_down',
-           'NAMENODE::Namenode Web UI down':'namenode_ui_down',
-           'JOBTRACKER::JobHistory Web UI down':'jobhistory_ui_down',
-           'JOBTRACKER::JobTracker Web UI down':'jobtracker_ui_down',
-
-           'HBASEMASTER::HBase Master CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS capacity utilization':'hdfs_percent_capacity',
-           'NAMENODE::NameNode edit logs directory status':'namenode_edit_log_write',
-           'DATANODE::DataNode process down':'datanode_process_down',
-           'NAMENODE::NameNode process down':'namenode_process_down',
-           'HDFS::NameNode RPC latency':'namenode_rpc_latency',
-           'DATANODE::DataNode storage full':'datanodes_storage_full',
-           'JOBTRACKER::JobTracker process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC latency':'jobtracker_rpc_latency',
-           'TASKTRACKER::TaskTracker process down':'tasktracker_process_down',
-           'HBASEMASTER::HBase Master process down':'hbasemaster_process_down',
-           'REGIONSERVER::RegionServer process down':'regionserver_process_down',
-           'HBASE::Percent RegionServers down':'regionservers_down',
-           'HIVE-METASTORE::Hive Metastore status check':'hive_metastore_process_down',
-           'HIVE-METASTORE::Hive Metastore process':'hive_metastore_process_down',
-           'ZOOKEEPER::Percent ZooKeeper Servers down':'zookeepers_down',
-           'ZOOKEEPER::ZooKeeper Server process down':'zookeeper_process_down',
-           'OOZIE::Oozie Server status check':'oozie_down',
-           'WEBHCAT::WebHCat Server status check':'templeton_down',
-           'GANGLIA::Ganglia [gmetad] process down':'ganglia_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for NameNode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary NameNode process down':'secondary_namenode_process_down',
-           'JOBTRACKER::JobTracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Master Web UI down':'hbase_ui_down',
-           'NAMENODE::NameNode Web UI down':'namenode_ui_down',
-           'Oozie status check':'oozie_down',
-           'WEBHCAT::WebHcat status check':'templeton_down',
-
-           # Ambari Nagios service check descriptions
-           'DATANODE::DataNode process':'datanode_process',
-           'NAMENODE::NameNode process':'namenode_process',
-           'NAMENODE::Secondary NameNode process':'secondary_namenode_process',
-           'JOURNALNODE::JournalNode process':'journalnode_process',
-           'ZOOKEEPER::ZooKeeper Server process':'zookeeper_process_down',
-           'JOBTRACKER::JobTracker process':'jobtracker_process',
-           'TASKTRACKER::TaskTracker process':'tasktracker_process',
-           'GANGLIA::Ganglia Server process':'ganglia_server_process',
-           'GANGLIA::Ganglia Monitor process for Slaves':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for NameNode':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for JobTracker':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HBase Master':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for ResourceManager':'ganglia_monitor_process',
-           'GANGLIA::Ganglia Monitor process for HistoryServer':'ganglia_monitor_process',
-           'HBASEMASTER::HBase Master process':'hbase_master_process',
-           'HBASE::Percent RegionServers live':'regionservers_down',
-           'REGIONSERVER::RegionServer process':'regionserver_process',
-           'NAGIOS::Nagios status log freshness':'nagios_process',
-           'FLUME::Flume Agent process':'flume_agent_process',
-           'OOZIE::Oozie Server status':'oozie_down',
-           'HIVE-METASTORE::Hive Metastore status':'hive_metastore_process',
-           'WEBHCAT::WebHCat Server status':'webhcat_down',
-           'RESOURCEMANAGER::ResourceManager process':'resourcemanager_process_down',
-           'RESOURCEMANAGER::ResourceManager RPC latency':'resourcemanager_rpc_latency',
-           'RESOURCEMANAGER::ResourceManager CPU utilization':'resourcemanager_cpu_utilization',
-           'RESOURCEMANAGER::ResourceManager Web UI':'recourcemanager_ui',
-           'NODEMANAGER::NodeManager process':'nodemanager_process_down',
-           'NODEMANAGER::NodeManager health':'nodemanager_health',
-           'NODEMANAGER::Percent NodeManagers live':'nodemanagers_down',
-           'APP_TIMELINE_SERVER::App Timeline Server process':'timelineserver_process',
-           'JOBHISTORY::HistoryServer RPC latency':'historyserver_rpc_latency',
-           'JOBHISTORY::HistoryServer CPU utilization':'historyserver_cpu_utilization',
-           'JOBHISTORY::HistoryServer Web UI':'historyserver_ui',
-           'JOBHISTORY::HistoryServer process':'historyserver_process'}
-
-# Determine the severity of the TVI alert based on the Nagios alert state.
-def determine_severity(state, service):
-    if severities.has_key(state):
-        severity = severities[state]
-    else: severity = 'Warning'
-
-    # For some alerts, warning should be converted to Degraded
-    if severity == 'Warning' and service in degraded_alert_services:
-        severity = 'Degraded'
-    elif severity != 'OK' and service in fatal_alert_services:
-        severity = 'Fatal'
-
-    return severity
-
-
-# Determine the msg id for the TVI alert from based on the service which generates the Nagios alert.
-# The msg id is used to correlate a log msg to a TVI rule.
-def determine_msg_id(service, severity):
-  for k, v in msg_ids.iteritems():
-    if(k in service):
-      msg_id = v
-      if severity == 'OK':
-        msg_id = '{0}_ok'.format(msg_id)
-      return msg_id
-  return 'HADOOP_UNKNOWN_MSG'
-
-
-# Determine the domain.  Currently the domain is always 'Hadoop'.
-def determine_domain():
-    return 'Hadoop'
-
-
-# log the TVI msg to the syslog
-def log_tvi_msg(msg):
-    syslog.openlog('nagios', syslog.LOG_PID)
-    syslog.syslog(msg)
-
-
-# generate a tvi log msg from a Hadoop alert
-def generate_tvi_log_msg(alert_type, attempt, state, service, msg):
-    # Determine the TVI msg contents
-    severity = determine_severity(state, service)  # The TVI alert severity.
-    domain   = determine_domain()                  # The domain specified in the TVI alert.
-    msg_id   = determine_msg_id(service, severity) # The msg_id used to correlate to a TVI rule.
-
-    # Only log HARD alerts
-    if alert_type == 'HARD':
-        # Format and log msg
-        log_tvi_msg('{0}: {1}: {2}# {3}'.format(severity, domain, msg_id, msg))
-
-
-# main method which is called when invoked on the command line
-def main():
-    generate_tvi_log_msg(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
-
-
-# run the main method
-if __name__ == '__main__':
-    main()
-    sys.exit(0)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 7252f8f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-# Gets if the java version is greater than 6
-def is_jdk_greater_6(java64_home):
-  import os
-  import re
-  java_bin = os.path.join(java64_home, 'bin', 'java')
-  ver_check = shell.call([java_bin, '-version'])
-
-  ver = ''
-  if 0 != ver_check[0]:
-    # java is not local, try the home name as a fallback
-    ver = java64_home
-  else:
-    ver = ver_check[1]
-
-  regex = re.compile('"1\.([0-9]*)\.0_([0-9]*)"', re.IGNORECASE)
-  r = regex.search(ver)
-  if r:
-    strs = r.groups()
-    if 2 == len(strs):
-      minor = int(strs[0])
-      if minor > 6:
-        return True
-
-  return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index a63ea38..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir, params.ambarinagios_php_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  File( format("{ambarinagios_php_dir}/{ambarinagios_php_filename}"),
-    content = StaticFile(params.ambarinagios_php_filename),
-  )
-
-  File( params.hdp_mon_nagios_addons_path,
-    content = StaticFile("hdp_mon_nagios_addons.conf"),
-  )
-
-  File(format("{nagios_var_dir}/ignore.dat"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0664)
-  
-  if System.get_instance().os_family == "ubuntu":
-    Link(params.ubuntu_stylesheets_desired_location,
-         to = params.ubuntu_stylesheets_real_location
-    )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  {conf_dir}/htpasswd.users {nagios_web_login} {nagios_web_password!p}")
-  Execute(cmd)
-
-  File( format("{conf_dir}/htpasswd.users"),
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().os_family == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  elif System.get_instance().os_family == "ubuntu":
-    command = format("usermod -G {nagios_group} www-data") # check -a ???
-  elif System.get_instance().os_family == "redhat":
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index da35b34..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-from nagios_service import update_active_alerts
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    update_ignorable(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-
-    # check for alert structures
-    update_active_alerts()
-
-    
-def remove_conflicting_packages():  
-  Package('hdp_mon_nagios_addons', action = "remove")
-
-  Package('nagios-plugins', action = "remove")
-  
-  if System.get_instance().os_family in ["redhat","suse"]:
-    Execute("rpm -e --allmatches --nopostun nagios",
-      path  = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-      ignore_failures = True)
-
-def update_ignorable(params):
-  if not params.config.has_key('passiveInfo'):
-    return
-  else:
-    buf = ""
-    count = 0
-    for define in params.config['passiveInfo']:
-      try:
-        host = str(define['host'])
-        service = str(define['service'])
-        component = str(define['component'])
-        buf += host + " " + service + " " + component + "\n"
-        count += 1
-      except KeyError:
-        pass
-
-    f = None
-    try:
-      f = open('/var/nagios/ignore.dat', 'w')
-      f.write(buf)
-      if 1 == count:
-        Logger.info("Persisted '/var/nagios/ignore.dat' with 1 entry")
-      elif count > 1:
-        Logger.info("Persisted '/var/nagios/ignore.dat' with " + str(count) + " entries")
-    except:
-      Logger.info("Could not persist '/var/nagios/ignore.dat'")
-      pass
-    finally:
-      if f is not None:
-        f.close()
-
-
-if __name__ == "__main__":
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index 883442c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().os_family != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d',
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_cpu.php')
-  nagios_server_check( 'check_cpu_ha.php')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_rpcq_latency_ha.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_webui_ha.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-  nagios_server_check( 'check_checkpoint_time.py' )
-  nagios_server_check( 'sys_logger.py' )
-  nagios_server_check( 'check_ambari_alerts.py' )
-  nagios_server_check( 'mm_wrapper.py' )
-  nagios_server_check( 'check_hive_thrift_port.py' )
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index b7f512b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import json
-import os
-import signal
-
-from resource_management import *
-from os.path import isfile
-
-
-def nagios_service(action='start'): # start or stop
-  import params
-  
-  nagios_pid_file = format("{nagios_pid_file}")
-
-  if action == 'start':
-    command = format("service {nagios_service_name} start")
-    Execute(command)   
-  elif action == 'stop':
-    # attempt to grab the pid in case we need it later
-    nagios_pid = 0    
-    if isfile(nagios_pid_file):   
-      with open(nagios_pid_file, "r") as file:
-        try:
-          nagios_pid = int(file.read())
-          Logger.info("Nagios is running with a PID of {0}".format(nagios_pid))
-        except:
-          Logger.info("Unable to read PID file {0}".format(nagios_pid_file))
-        finally:
-          file.close()
-
-    command = format("service {nagios_service_name} stop")  
-    Execute(command)
-
-    # on SUSE, there is a bug where Nagios doesn't kill the process 
-    # but this could also affect any OS, so don't restrict this to SUSE
-    if nagios_pid > 0:
-      try:
-        os.kill(nagios_pid, 0)
-      except:
-        Logger.info("The Nagios process has successfully terminated")
-      else:
-        Logger.info("The Nagios process with ID {0} failed to terminate; explicitly killing.".format(nagios_pid))
-        os.kill(nagios_pid, signal.SIGKILL)
-
-    # in the event that the Nagios scripts don't remove the pid file
-    if isfile( nagios_pid_file ):   
-      Execute(format("rm -f {nagios_pid_file}"))
-        
-  MonitorWebserver("restart")
-
-def update_active_alerts():
-  import status_params
-
-  alerts = None
-  if 'alerts' in status_params.config and status_params.config['alerts'] is not None:
-    alerts = status_params.config['alerts']
-
-  if alerts is None:
-    return
-
-  output = {}
-
-  for a in alerts:
-    alert_name = a['name']
-    alert_text = a['text']
-    alert_state = a['state']
-    alert_host = a['host']
-    if not output.has_key(alert_name):
-      output[alert_name] = {}
-
-    if not output[alert_name].has_key(alert_host):
-      output[alert_name][alert_host] = []
-
-    host_items = output[alert_name][alert_host]
-    alert_out = {}
-    alert_out['state'] = alert_state
-    alert_out['text'] = alert_text
-    host_items.append(alert_out)
-
-  with open(os.path.join(status_params.nagios_var_dir, 'ambari.json'), 'w') as f:
-    json.dump(output, f)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index ababfa6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,366 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from functions import is_jdk_greater_6
-from resource_management import *
-import status_params
-
-HADOOP_HTTP_POLICY = "HTTP_ONLY"
-HADOOP_HTTPS_POLICY = "HTTPS_ONLY"
-
-# server configurations
-config = Script.get_config()
-
-if System.get_instance().os_family == "ubuntu":
-  nagios_service_name = "nagios3"
-else:
-  nagios_service_name = "nagios"
-
-conf_dir = format("/etc/{nagios_service_name}")
-nagios_obj_dir = format("{conf_dir}/objects")
-nagios_var_dir = status_params.nagios_var_dir
-nagios_rw_dir = status_params.nagios_rw_dir
-
-# HACK: Stylesheets for Nagios UI on Ubuntu are in wrong place so we have to do a symlink.
-# In future we can fix this directly in the package.
-ubuntu_stylesheets_real_location = "/etc/nagios3/stylesheets"
-ubuntu_stylesheets_desired_location = "/usr/share/nagios3/htdocs/stylesheets"
-
-if System.get_instance().os_family == "ubuntu":
-  host_template = "generic-host"
-  plugins_dir = "/usr/lib/nagios/plugins"
-  nagios_web_dir = "/usr/share/nagios3/htdocs"
-  
-  cfg_files = [
-    format("{conf_dir}/commands.cfg"),
-    format("{conf_dir}/conf.d/contacts_nagios2.cfg"),
-    format("{conf_dir}/conf.d/generic-host_nagios2.cfg"),
-    format("{conf_dir}/conf.d/generic-service_nagios2.cfg"),
-    format("{conf_dir}/conf.d/timeperiods_nagios2.cfg"),
-  ]
-  cgi_dir = "/usr/lib/cgi-bin/nagios3"
-  cgi_weblink = "/cgi-bin/nagios3"
-else:
-  host_template = "linux-server"
-  plugins_dir = "/usr/lib64/nagios/plugins"
-  nagios_web_dir = "/usr/share/nagios"
-  
-  cfg_files = [
-    format("{nagios_obj_dir}/commands.cfg"),
-    format("{nagios_obj_dir}/contacts.cfg"),
-    format("{nagios_obj_dir}/timeperiods.cfg"),
-    format("{nagios_obj_dir}/templates.cfg"),
-  ]
-  
-  cgi_dir = "/usr/lib/nagios/cgi"
-  cgi_weblink = "/nagios/cgi-bin"
-  
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("/configurations/nagios-env/nagios_principal_name", "nagios")
-
-oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-if type(_rm_host) is list:
-  rm_hosts_in_str = ','.join(_rm_host)
-
-has_namenode = not namenode_host == None
-has_rm = not _rm_host == None
-
-# - test for HDFS or HCFS (glusterfs)
-if 'namenode_host' in config['clusterHostInfo']:
-  ishdfs_value = "HDFS"
-else:
-  ishdfs_value = None
-
-# HDFS, YARN, and MR use different settings to enable SSL
-hdfs_ssl_enabled = False
-yarn_ssl_enabled = False
-mapreduce_ssl_enabled = False
-
-# initialize all http policies to HTTP_ONLY
-dfs_http_policy = HADOOP_HTTP_POLICY
-yarn_http_policy = HADOOP_HTTP_POLICY
-mapreduce_http_policy = HADOOP_HTTP_POLICY
-
-#
-if has_namenode:
-  if 'dfs.http.policy' in config['configurations']['hdfs-site']:
-    dfs_http_policy = config['configurations']['hdfs-site']['dfs.http.policy']
-  if dfs_http_policy == HADOOP_HTTPS_POLICY:
-    hdfs_ssl_enabled = True
-if has_rm:
-  if 'yarn.http.policy' in config['configurations']['yarn-site']:
-    yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
-
-  if 'mapreduce.jobhistory.http.policy' in config['configurations']['mapred-site']:
-    mapreduce_http_policy = config['configurations']['mapred-site']['mapreduce.jobhistory.http.policy']
-
-if dfs_http_policy == HADOOP_HTTPS_POLICY:
-  hdfs_ssl_enabled = True
-
-if yarn_http_policy == HADOOP_HTTPS_POLICY:
-  yarn_ssl_enabled = True
-
-if mapreduce_http_policy == HADOOP_HTTPS_POLICY:
-  mapreduce_ssl_enabled = True
-
-# set default ports and webui lookup properties
-dfs_namenode_webui_default_port = '50070'
-dfs_snamenode_webui_default_port = '50090'
-yarn_nodemanager_default_port = '8042'
-dfs_namenode_webui_property = 'dfs.namenode.http-address'
-dfs_snamenode_webui_property = 'dfs.namenode.secondary.http-address'
-dfs_datanode_webui_property = 'dfs.datanode.http.address'
-yarn_rm_webui_property = 'yarn.resourcemanager.webapp.address'
-yarn_timeline_service_webui_property = 'yarn.timeline-service.webapp.address'
-yarn_nodemanager_webui_property = 'yarn.nodemanager.webapp.address'
-mapreduce_jobhistory_webui_property = 'mapreduce.jobhistory.webapp.address'
- 
-# if HDFS is protected by SSL, adjust the ports and lookup properties
-if hdfs_ssl_enabled == True:
-  dfs_namenode_webui_default_port = '50470'
-  dfs_snamenode_webui_default_port = '50091'
-  dfs_namenode_webui_property = 'dfs.namenode.https-address'
-  dfs_snamenode_webui_property = 'dfs.namenode.secondary.https-address'
-  dfs_datanode_webui_property = 'dfs.datanode.https.address'
-
-# if YARN is protected by SSL, adjust the ports and lookup properties  
-if yarn_ssl_enabled == True:
-  yarn_rm_webui_property = 'yarn.resourcemanager.webapp.https.address'
-  yarn_nodemanager_webui_property = 'yarn.nodemanager.webapp.https.address'  
-  yarn_timeline_service_webui_property = 'yarn.timeline-service.webapp.https.address'
-
-# if MR is protected by SSL, adjust the ports and lookup properties
-if mapreduce_ssl_enabled == True:
-  mapreduce_jobhistory_webui_property = 'mapreduce.jobhistory.webapp.https.address'
-  
-if has_namenode:
-  # extract NameNode
-  if dfs_namenode_webui_property in config['configurations']['hdfs-site']:
-    namenode_port = get_port_from_url(config['configurations']['hdfs-site'][dfs_namenode_webui_property])
-  else:
-    namenode_port = dfs_namenode_webui_default_port
-
-  # extract Secondary NameNode
-  if dfs_snamenode_webui_property in config['configurations']['hdfs-site']:
-    snamenode_port = get_port_from_url(config['configurations']['hdfs-site'][dfs_snamenode_webui_property])
-  else:
-    snamenode_port = dfs_snamenode_webui_default_port
-
-  if 'dfs.journalnode.http-address' in config['configurations']['hdfs-site']:
-    journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-    datanode_port = get_port_from_url(config['configurations']['hdfs-site'][dfs_datanode_webui_property])
-
-nm_port = yarn_nodemanager_default_port
-if has_rm:
-  if yarn_nodemanager_webui_property in config['configurations']['yarn-site']:
-    nm_port = get_port_from_url(config['configurations']['yarn-site'][yarn_nodemanager_webui_property])
-  
-flume_port = "4159"
-hbase_master_rpc_port = default('/configurations/hbase-site/hbase.master.port', "60000")
-rm_port = get_port_from_url(config['configurations']['yarn-site'][yarn_rm_webui_property])
-hs_port = get_port_from_url(config['configurations']['mapred-site'][mapreduce_jobhistory_webui_property])
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_master_port = config['configurations']['hbase-site']['hbase.master.info.port'] #"60010"
-hbase_rs_port = config['configurations']['hbase-site']['hbase.regionserver.info.port'] #"60030"
-storm_ui_port = config['configurations']['storm-site']['ui.port']
-drpc_port = config['configurations']['storm-site']['drpc.port']
-nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
-supervisor_port = "56431"
-storm_rest_api_port = "8745"
-falcon_port = config['configurations']['falcon-env']['falcon_port']
-ahs_port = get_port_from_url(config['configurations']['yarn-site'][yarn_timeline_service_webui_property])
-knox_gateway_port = config['configurations']['gateway-site']['gateway.port']
-kafka_broker_port = config['configurations']['kafka-broker']['port']
-
-# use sensible defaults for checkpoint as they are required by Nagios and 
-# may not be part of hdfs-site.xml on an upgrade
-if has_namenode:
-  if 'dfs.namenode.checkpoint.period' in config['configurations']['hdfs-site']:
-    dfs_namenode_checkpoint_period = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.period']
-  else:
-    dfs_namenode_checkpoint_period = '21600'
-  
-  if 'dfs.namenode.checkpoint.txns' in config['configurations']['hdfs-site']:
-    dfs_namenode_checkpoint_txns = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.txns']
-  else:
-    dfs_namenode_checkpoint_txns = '1000000'
-
-# this is different for HDP1
-nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK 
-
-
-java64_home = config['hostLevelParams']['java_home']
-check_cpu_on = is_jdk_greater_6(java64_home)
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nagios_keytab_path = default("/configurations/nagios-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-nn_ha_host_port_map = {}
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    nn_ha_host_port_map[nn_host.split(":")[0]] = nn_host.split(":")[1]
-else:
-  if 'namenode_host' in config['clusterHostInfo']:
-    namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.defaultFS'])
-    nn_ha_host_port_map[config['clusterHostInfo']['namenode_host'][0]] = namenode_metadata_port
-  else:
-    namenode_metadata_port = '8020'
-    
-os_family = System.get_instance().os_family
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().os_family == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-  web_conf_dir = "/etc/apache2/conf.d"
-elif System.get_instance().os_family == "ubuntu":
-  nagios_p1_pl = "/usr/lib/nagios3/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  web_conf_dir = "/etc/apache2/conf.d"
-elif System.get_instance().os_family == "redhat":
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  web_conf_dir = "/etc/httpd/conf.d"
-
-nagios_httpd_config_file = format("{web_conf_dir}/{nagios_service_name}.conf")
-hdp_mon_nagios_addons_path = format("{web_conf_dir}/hdp_mon_nagios_addons.conf")
-
-ambarinagios_php_dir = "/usr/share/hdp/nagios/"
-ambarinagios_php_filename = "nagios_alerts.php"
-
-nagios_user = config['configurations']['nagios-env']['nagios_user']
-nagios_group = config['configurations']['nagios-env']['nagios_group']
-nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
-nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
-user_group = config['configurations']['cluster-env']['user_group']
-nagios_contact = config['configurations']['nagios-env']['nagios_contact']
-
-
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-if type(_rm_host) is list:
-  rm_hosts_in_str = ','.join(_rm_host)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-_app_timeline_server_hosts = default("/clusterHostInfo/app_timeline_server_hosts",None)
-_nimbus_host = default("/clusterHostInfo/nimbus_hosts",None)
-_drpc_host = default("/clusterHostInfo/drpc_server_hosts",None)
-_supervisor_hosts = default("/clusterHostInfo/supervisor_hosts",None)
-_storm_ui_host = default("/clusterHostInfo/storm_ui_server_hosts",None)
-_storm_rest_api_hosts = default("/clusterHostInfo/storm_rest_api_hosts",None)
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-if type(hbase_master_hosts) is list:
-  hbase_master_hosts_in_str = ','.join(hbase_master_hosts)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-_falcon_host = default("/clusterHostInfo/falcon_server_hosts", None)
-# can differ on HDP1
-#_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-_knox_gateway_host =  default("/clusterHostInfo/knox_gateway_hosts", None)
-_kafka_broker_host =  default("/clusterHostInfo/kafka_broker_hosts", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-if 'namenode_host' in config['clusterHostInfo']:
-  nn_hosts_string = " ".join(namenode_host)
-else:
-  nn_hosts_string = " ".join(config['clusterHostInfo']['ambari_server_host'])
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts,
-    'nimbus' : _nimbus_host,
-    'drpc-server' : _drpc_host,
-    'storm_ui' : _storm_ui_host,
-    'supervisors' : _supervisor_hosts,
-    'storm_rest_api' : _storm_rest_api_hosts,
-    'falcon-server' : _falcon_host,
-    'ats-servers' : _app_timeline_server_hosts,
-    'knox-gateway' : _knox_gateway_host,
-    'kafka-broker' : _kafka_broker_host
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 11d4aa9..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")
-
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 610b2bd..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,109 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}


[22/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index f70eee8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,177 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute("/bin/echo 0 > /selinux/enforce",
-          only_if="test -f /selinux/enforce"
-  )
-
-  install_snappy()
-
-  #directories
-  if params.has_namenode:
-    Directory(params.hdfs_log_dir_prefix,
-              recursive=True,
-              owner='root',
-              group=params.user_group,
-              mode=0775
-    )
-    Directory(params.hadoop_pid_dir_prefix,
-              recursive=True,
-              owner='root',
-              group='root'
-    )
-  #this doesn't needed with stack 1
-    Directory(params.hadoop_tmp_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-         owner=tc_owner,
-         content=Template('commons-logging.properties.j2')
-    )
-
-    health_check_template = "health_check-v2" #for stack 1 use 'health_check'
-    File(os.path.join(params.hadoop_conf_dir, "health_check"),
-         owner=tc_owner,
-         content=Template(health_check_template + ".j2")
-    )
-
-    log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-    if (params.log4j_props != None):
-      File(log4j_filename,
-           mode=0644,
-           group=params.user_group,
-           owner=params.hdfs_user,
-           content=params.log4j_props
-      )
-    elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-      File(log4j_filename,
-           mode=0644,
-           group=params.user_group,
-           owner=params.hdfs_user,
-      )
-
-    File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-         owner=params.hdfs_user,
-         content=Template("hadoop-metrics2.properties.j2")
-    )
-
-def setup_database():
-  """
-  Load DB
-  """
-  import params
-  db_driver_dload_cmd = ""
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf -x \"\" \
-      --retry 5 {oracle_driver_symlink_url} -o {hadoop_lib_home}/{db_driver_filename}",)
-  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf -x \"\" \
-      --retry 5 {mysql_driver_symlink_url} -o {hadoop_lib_home}/{db_driver_filename}")
-
-  if db_driver_dload_cmd:
-    Execute(db_driver_dload_cmd,
-            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}"),
-            environment = environment
-    )
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode:
-    File(params.task_log4j_properties_location,
-         content=StaticFile("task-log4j.properties"),
-         mode=0755
-    )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-  generate_include_file()
-
-
-def generate_include_file():
-  import params
-
-  if params.has_namenode and params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-
-def install_snappy():
-  import params
-
-  snappy_so = "libsnappy.so"
-  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-  so_src_dir_x86 = format("{hadoop_home}/lib")
-  so_src_dir_x64 = format("{hadoop_home}/lib64")
-  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  if params.has_namenode:
-    Execute(
-      format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-    Execute(
-      format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))
-
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Execute("mkdir -p /usr/jdk64/")
-    Execute("ln -s /usr/jdk/jdk1.6.0_31 /usr/jdk64/jdk1.6.0_31")
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 2197ba5..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index 1adba80..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index c4759f4..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,65 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2
deleted file mode 100644
index 0a03d17..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index ff17b19..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,109 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_phd_template_var("::phd::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_phd_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_phd_template_var("::phd::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-
-# Run all checks
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml
deleted file mode 100644
index ca45822..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>true</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml
deleted file mode 100644
index 4ca3b26..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  
-  <os family="redhat6">
-    <repo>
-      <baseurl>http://localhost/PHD</baseurl>
-      <repoid>PHD-3.0.0.0</repoid>
-      <reponame>PHD</reponame>
-    </repo>
-    <repo>
-  	  
-      <baseurl>http://localhost/PHD-UTILS</baseurl>
-      <repoid>PHD-UTILS-1.0</repoid>
-      <reponame>PHD-UTILS</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json
deleted file mode 100644
index b52c4d2..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json
+++ /dev/null
@@ -1,75 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
-    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
-    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
-    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
-    "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "POSTGRESQL_SERVER-START"],
-    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START", "POSTGRESQL_SERVER-START"],
-    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
-    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START",
-        "MYSQL_SERVER-START", "POSTGRESQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
-    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"]
-  },
-  "_comment" : "GLUSTERFS-specific dependencies",
-  "optional_glusterfs": {
-    "HBASE_MASTER-START": ["PEERSTATUS-START"],
-    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
-  },
-  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
-  "optional_no_glusterfs": {
-    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
-    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
-    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
-    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HIVE_SERVER-START": ["DATANODE-START"],
-    "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
-    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
-        "SECONDARY_NAMENODE-START"],
-    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
-        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
-    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
-    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"]
-  },
-  "_comment" : "Dependencies that are used in HA NameNode cluster",
-  "namenode_optional_ha": {
-    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
-  },
-  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
-  "resourcemanager_optional_ha" : {
-    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml
deleted file mode 100644
index 74a4c15..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>content</name>
-    <description>Describe all the Flume agent configurations</description>
-    <value>
-# Flume agent config
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml
deleted file mode 100644
index 902b3ca..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>flume_conf_dir</name>
-    <value>/etc/flume/conf</value>
-    <description>Location to save configuration files</description>
-  </property>
-  <property>
-    <name>flume_log_dir</name>
-    <value>/var/log/flume</value>
-    <description>Location to save log files</description>
-  </property>
-  <property>
-    <name>flume_user</name>
-    <value>flume</value>
-    <property-type>USER</property-type>
-    <description>Flume User</description>
-  </property>
-
-  <!-- flume-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for flume-env.sh file</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
-# during Flume startup.
-
-# Enviroment variables can be set here.
-
-export JAVA_HOME={{java_home}}
-
-# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
-# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
-
-# Note that the Flume conf directory is always included in the classpath.
-#FLUME_CLASSPATH=""
-
-# export HIVE_HOME=/usr/lib/hive
-# export HCAT_HOME=/usr/lib/hive-hcatalog
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml
deleted file mode 100644
index 8c6ac27..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-# Flume log4j config
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml
deleted file mode 100644
index 7421be1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>FLUME</name>
-      <displayName>Flume</displayName>
-      <comment>Data management and processing platform</comment>
-      <version>1.5.0.1.phd.3.0.0.0</version>
-      <components>
-        <component>
-          <name>FLUME_HANDLER</name>
-          <displayName>Flume</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/flume_handler.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>flume</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/flume_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>flume-env</config-type>
-        <config-type>flume-conf</config-type>
-        <config-type>flume-log4j</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json
deleted file mode 100644
index b0eae2e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json
+++ /dev/null
@@ -1,720 +0,0 @@
-{
-  "FLUME_HANDLER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.metrics.gcCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.metrics.gcTimeMillis",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.metrics.logError",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.metrics.logFatal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.metrics.logInfo",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.metrics.logWarn",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/maxMemoryM": {
-              "metric": "jvm.metrics.maxMemoryM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.metrics.memHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.metrics.memHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.metrics.memNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.metrics.memNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.metrics.threadsBlocked",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.metrics.threadsNew",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.metrics.threadsRunnable",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.metrics.threadsTerminated",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.metrics.threadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.metrics.threadsWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/ChannelCapacity": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelCapacity",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/StartTime": {
-              "metric": "(\\w+).CHANNEL.(\\w+).StartTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/StopTime": {
-              "metric": "(\\w+).CHANNEL.(\\w+).StopTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/ChannelSize": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/ConnectionCreatedCount": {
-              "metric": "(\\w+).SINK.(\\w+).ConnectionCreatedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/BatchCompleteCount": {
-              "metric": "(\\w+).SINK.(\\w+).BatchCompleteCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/EventDrainSuccessCount": {
-              "metric": "(\\w+).SINK.(\\w+).EventDrainSuccessCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/StartTime": {
-              "metric": "(\\w+).SINK.(\\w+).StartTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/EventDrainAttemptCount": {
-              "metric": "(\\w+).SINK.(\\w+).EventDrainAttemptCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/ConnectionFailedCount": {
-              "metric": "(\\w+).SINK.(\\w+).ConnectionFailedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/BatchUnderflowCount": {
-              "metric": "(\\w+).SINK.(\\w+).BatchUnderflowCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/ConnectionClosedCount": {
-              "metric": "(\\w+).SINK.(\\w+).ConnectionClosedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/StopTime": {
-              "metric": "(\\w+).SINK.(\\w+).StopTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/BatchEmptyCount": {
-              "metric": "(\\w+).SINK.(\\w+).BatchEmptyCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/StartTime": {
-              "metric": "(\\w+).SOURCE.(\\w+).StartTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/OpenConnectionCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).OpenConnectionCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendReceivedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendReceivedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/EventReceivedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).EventReceivedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/StopTime": {
-              "metric": "(\\w+).SOURCE.(\\w+).StopTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/EventAcceptedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).EventAcceptedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
-              "pointInTime": false,
-              "temporal": true
-            },
-
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
-              "pointInTime": false,
-              "temporal": true
-            },
-
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
-              "pointInTime": false,
-              "temporal": true
-            }
-
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.metrics.gcCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.metrics.gcTimeMillis",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.metrics.logError",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.metrics.logFatal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.metrics.logInfo",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.metrics.logWarn",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/maxMemoryM": {
-              "metric": "jvm.metrics.maxMemoryM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.metrics.memHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.metrics.memHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.metrics.memNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.metrics.memNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.metrics.threadsBlocked",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.metrics.threadsNew",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.metrics.threadsRunnable",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.metrics.threadsTerminated",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.metrics.threadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.metrics.threadsWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/ChannelCapacity": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelCapacity",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/StartTime": {
-              "metric": "(\\w+).CHANNEL.(\\w+).StartTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/StopTime": {
-              "metric": "(\\w+).CHANNEL.(\\w+).StopTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/ChannelSize": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/ConnectionCreatedCount": {
-              "metric": "(\\w+).SINK.(\\w+).ConnectionCreatedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/BatchCompleteCount": {
-              "metric": "(\\w+).SINK.(\\w+).BatchCompleteCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/EventDrainSuccessCount": {
-              "metric": "(\\w+).SINK.(\\w+).EventDrainSuccessCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/StartTime": {
-              "metric": "(\\w+).SINK.(\\w+).StartTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/EventDrainAttemptCount": {
-              "metric": "(\\w+).SINK.(\\w+).EventDrainAttemptCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/ConnectionFailedCount": {
-              "metric": "(\\w+).SINK.(\\w+).ConnectionFailedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/BatchUnderflowCount": {
-              "metric": "(\\w+).SINK.(\\w+).BatchUnderflowCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/ConnectionClosedCount": {
-              "metric": "(\\w+).SINK.(\\w+).ConnectionClosedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/StopTime": {
-              "metric": "(\\w+).SINK.(\\w+).StopTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SINK/$2/BatchEmptyCount": {
-              "metric": "(\\w+).SINK.(\\w+).BatchEmptyCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/StartTime": {
-              "metric": "(\\w+).SOURCE.(\\w+).StartTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/OpenConnectionCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).OpenConnectionCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/AppendReceivedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).AppendReceivedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/EventReceivedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).EventReceivedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/StopTime": {
-              "metric": "(\\w+).SOURCE.(\\w+).StopTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/flume/$1/SOURCE/$2/EventAcceptedCount": {
-              "metric": "(\\w+).SOURCE.(\\w+).EventAcceptedCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
-              "pointInTime": false,
-              "temporal": true
-            },
-
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
-              "metric": "(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
-              "pointInTime": false,
-              "temporal": true
-            },
-
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._max",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._min",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
-              "metric": "(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
-              "pointInTime": false,
-              "temporal": true
-            }
-
-          }
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py
deleted file mode 100644
index 2db4039..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py
+++ /dev/null
@@ -1,255 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import glob
-import json
-import os
-from resource_management import *
-
-def flume(action = None):
-  import params
-
-  if action == 'config':
-    # remove previously defined meta's
-    for n in find_expected_agent_names():
-      os.unlink(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'))
-
-    Directory(params.flume_conf_dir, recursive=True)
-    Directory(params.flume_log_dir, owner=params.flume_user)
-
-    File(format("{flume_conf_dir}/flume-env.sh"),
-         owner=params.flume_user,
-         content=InlineTemplate(params.flume_env_sh_template)
-    )
-
-    flume_agents = {}
-    if params.flume_conf_content is not None:
-      flume_agents = build_flume_topology(params.flume_conf_content)
-
-    for agent in flume_agents.keys():
-      flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent)
-      flume_agent_conf_file = os.path.join(flume_agent_conf_dir, 'flume.conf')
-      flume_agent_meta_file = os.path.join(flume_agent_conf_dir, 'ambari-meta.json')
-      flume_agent_log4j_file = os.path.join(flume_agent_conf_dir, 'log4j.properties')
-
-      Directory(flume_agent_conf_dir)
-
-      PropertiesFile(flume_agent_conf_file,
-        properties=flume_agents[agent],
-        mode = 0644)
-
-      File(flume_agent_log4j_file,
-        content=Template('log4j.properties.j2', agent_name = agent),
-        mode = 0644)
-
-      File(flume_agent_meta_file,
-        content = json.dumps(ambari_meta(agent, flume_agents[agent])),
-        mode = 0644)
-
-  elif action == 'start':
-    # desired state for service should be STARTED
-    if len(params.flume_command_targets) == 0:
-      _set_desired_state('STARTED')
-      
-    flume_base = format('su -s /bin/bash {flume_user} -c "export JAVA_HOME={java_home}; '
-      '{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
-
-    for agent in cmd_target_names():
-      flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
-      flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf"
-      flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid"
-
-      if not os.path.isfile(flume_agent_conf_file):
-        continue
-
-      if not is_live(flume_agent_pid_file):
-        # TODO someday make the ganglia ports configurable
-        extra_args = ''
-        if params.ganglia_server_host is not None:
-          extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}'
-          extra_args = extra_args.format(params.ganglia_server_host, '8655')
-
-        flume_cmd = flume_base.format(agent, flume_agent_conf_dir,
-           flume_agent_conf_file, extra_args)
-
-        Execute(flume_cmd, wait_for_finish=False)
-
-        # sometimes startup spawns a couple of threads - so only the first line may count
-        pid_cmd = format('pgrep -o -u {flume_user} -f ^{java_home}.*{agent}.* > {flume_agent_pid_file}')
-        Execute(pid_cmd, logoutput=True, tries=10, try_sleep=6)
-
-    pass
-  elif action == 'stop':
-    # desired state for service should be INSTALLED
-    if len(params.flume_command_targets) == 0:
-      _set_desired_state('INSTALLED')
-
-    pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid")
-
-    if 0 == len(pid_files):
-      return
-
-    agent_names = cmd_target_names()
-
-
-    for agent in agent_names:
-      pid_file = params.flume_run_dir + os.sep + agent + '.pid'
-      pid = format('`cat {pid_file}` > /dev/null 2>&1')
-      Execute(format('kill {pid}'), ignore_failures=True)
-      File(pid_file, action = 'delete')
-
-
-def ambari_meta(agent_name, agent_conf):
-  res = {}
-
-  sources = agent_conf[agent_name + '.sources'].split(' ')
-  res['sources_count'] = len(sources)
-
-  sinks = agent_conf[agent_name + '.sinks'].split(' ')
-  res['sinks_count'] = len(sinks)
-
-  channels = agent_conf[agent_name + '.channels'].split(' ')
-  res['channels_count'] = len(channels)
-
-  return res
-
-# define a map of dictionaries, where the key is agent name
-# and the dictionary is the name/value pair
-def build_flume_topology(content):
-
-  result = {}
-  agent_names = []
-
-  for line in content.split('\n'):
-    rline = line.strip()
-    if 0 != len(rline) and not rline.startswith('#'):
-      pair = rline.split('=')
-      lhs = pair[0].strip()
-      rhs = pair[1].strip()
-
-      part0 = lhs.split('.')[0]
-
-      if lhs.endswith(".sources"):
-        agent_names.append(part0)
-
-      if not result.has_key(part0):
-        result[part0] = {}
-
-      result[part0][lhs] = rhs
-
-  # trim out non-agents
-  for k in result.keys():
-    if not k in agent_names:
-      del result[k]
-
-
-  return result
-
-def is_live(pid_file):
-  live = False
-
-  try:
-    check_process_status(pid_file)
-    live = True
-  except ComponentIsNotRunning:
-    pass
-
-  return live
-
-def live_status(pid_file):
-  import params
-
-  pid_file_part = pid_file.split(os.sep).pop()
-
-  res = {}
-  res['name'] = pid_file_part
-  
-  if pid_file_part.endswith(".pid"):
-    res['name'] = pid_file_part[:-4]
-
-  res['status'] = 'RUNNING' if is_live(pid_file) else 'NOT_RUNNING'
-  res['sources_count'] = 0
-  res['sinks_count'] = 0
-  res['channels_count'] = 0
-
-  flume_agent_conf_dir = params.flume_conf_dir + os.sep + res['name']
-  flume_agent_meta_file = flume_agent_conf_dir + os.sep + 'ambari-meta.json'
-
-  try:
-    with open(flume_agent_meta_file) as fp:
-      meta = json.load(fp)
-      res['sources_count'] = meta['sources_count']
-      res['sinks_count'] = meta['sinks_count']
-      res['channels_count'] = meta['channels_count']
-  except:
-    pass
-
-  return res
-  
-def flume_status():
-  import params
-
-  meta_files = find_expected_agent_names()
-  pid_files = []
-  for agent_name in meta_files:
-    pid_files.append(os.path.join(params.flume_run_dir, agent_name + '.pid'))
-
-  procs = []
-  for pid_file in pid_files:
-    procs.append(live_status(pid_file))
-
-  return procs
-
-# these are what Ambari believes should be running
-def find_expected_agent_names():
-  import params
-
-  files = glob.glob(params.flume_conf_dir + os.sep + "*/ambari-meta.json")
-  expected = []
-
-  for f in files:
-    expected.append(os.path.dirname(f).split(os.sep).pop())
-
-  return expected
-
-def cmd_target_names():
-  import params
-
-  if len(params.flume_command_targets) > 0:
-    return params.flume_command_targets
-  else:
-    return find_expected_agent_names()
-
-def _set_desired_state(state):
-  import params
-  try:
-    with open(os.path.join(params.flume_run_dir, 'ambari-state.txt'), 'w') as fp:
-      fp.write(state)
-  except:
-    pass
-
-def get_desired_state():
-  import params
-
-  try:
-    with open(os.path.join(params.flume_run_dir, 'ambari-state.txt'), 'r') as fp:
-      return fp.read()
-  except:
-    return 'INSTALLED'
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py
deleted file mode 100644
index b93b8e8..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class FlumeServiceCheck(Script):
-
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    if params.security_enabled:
-      principal_replaced = params.http_principal.replace("_HOST", params.hostname)
-      Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
-              user=params.smoke_user)
-
-    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
-            logoutput=True,
-            tries = 3,
-            try_sleep = 20)
-
-if __name__ == "__main__":
-  FlumeServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py
deleted file mode 100644
index 42ac560..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py
+++ /dev/null
@@ -1,121 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from flume import flume
-from flume import flume_status
-from flume import find_expected_agent_names
-from flume import get_desired_state
-
-class FlumeHandler(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-
-    flume(action='start')
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    flume(action='stop')
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    flume(action='config')
-
-  def status(self, env):
-    import params
-
-    env.set_params(params)
-
-    processes = flume_status()
-    expected_agents = find_expected_agent_names()
-
-    json = {}
-    json['processes'] = processes
-    json['alerts'] = []
-
-    alert = {}
-    alert['name'] = 'flume_agent'
-    alert['label'] = 'Flume Agent process'
-
-    if len(processes) == 0 and len(expected_agents) == 0:
-      alert['state'] = 'OK'
-
-      if not params.hostname is None:
-        alert['text'] = 'No agents defined on ' + params.hostname
-      else:
-        alert['text'] = 'No agents defined'
-
-    else:
-      crit = []
-      ok = []
-
-      for proc in processes:
-        if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING':
-          crit.append(proc['name'])
-        else:
-          ok.append(proc['name'])
-
-      text_arr = []
-
-      if len(crit) > 0:
-        text_arr.append("{0} {1} NOT running".format(", ".join(crit),
-          "is" if len(crit) == 1 else "are"))
-
-      if len(ok) > 0:
-        text_arr.append("{0} {1} running".format(", ".join(ok),
-          "is" if len(ok) == 1 else "are"))
-
-      plural = len(crit) > 1 or len(ok) > 1
-      alert['text'] = "Agent{0} {1} {2}".format(
-        "s" if plural else "",
-        " and ".join(text_arr),
-        "" if params.hostname is None else "on " + str(params.hostname))
-
-      alert['state'] = 'CRITICAL' if len(crit) > 0 else 'OK'
-
-    json['alerts'].append(alert)
-    self.put_structured_out(json)
-
-    # only throw an exception if there are agents defined and there is a 
-    # problem with the processes; if there are no agents defined, then 
-    # the service should report STARTED (green) ONLY if the desired state is started.  otherwise, INSTALLED (red)
-    if len(expected_agents) > 0:
-      for proc in processes:
-        if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING':
-          raise ComponentIsNotRunning()
-    elif len(expected_agents) == 0 and 'INSTALLED' == get_desired_state():
-      raise ComponentIsNotRunning()
-
-if __name__ == "__main__":
-  FlumeHandler().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py
deleted file mode 100644
index 227bf8a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-
-security_enabled = False
-
-#RPM versioning support
-rpm_version = default("/configurations/cluster-env/rpm_version", None)
-
-#hadoop params
-if rpm_version:
-  flume_bin = '/usr/phd/current/flume-client/bin/flume-ng'
-else:
-  flume_bin = '/usr/bin/flume-ng'
-
-flume_conf_dir = '/etc/flume/conf'
-java_home = config['hostLevelParams']['java_home']
-flume_log_dir = '/var/log/flume'
-flume_run_dir = '/var/run/flume'
-flume_user = 'flume'
-flume_group = 'flume'
-
-if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
-  flume_user = config['configurations']['flume-env']['flume_user']
-
-if (('flume-conf' in config['configurations']) and('content' in config['configurations']['flume-conf'])):
-  flume_conf_content = config['configurations']['flume-conf']['content']
-else:
-  flume_conf_content = None
-
-if (('flume-log4j' in config['configurations']) and ('content' in config['configurations']['flume-log4j'])):
-  flume_log4j_content = config['configurations']['flume-log4j']['content']
-else:
-  flume_log4j_content = None
-
-targets = default('/commandParams/flume_handler', None)
-flume_command_targets = [] if targets is None else targets.split(',')
-
-flume_env_sh_template = config['configurations']['flume-env']['content']
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', [])
-ganglia_server_host = None
-if 0 != len(ganglia_server_hosts):
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hostname = None
-if config.has_key('hostname'):
-  hostname = config['hostname']

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2
deleted file mode 100644
index 70e495c..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-# flume.conf: Add your flume configuration here and start flume
-#             Note if you are using the Windows srvice or Unix service
-#             provided by the PHD distribution, they will assume the
-#             agent's name in this file to be 'a1'
-#
-{{flume_agent_conf_content}}


[03/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
deleted file mode 100644
index 526a328..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
+++ /dev/null
@@ -1,5354 +0,0 @@
-{
-  "NODEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsFailed": {
-              "metric": "mapred.ShuffleOutputsFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.metrics.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersCompleted": {
-              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersKilled": {
-              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedGB": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsOK": {
-              "metric": "mapred.ShuffleOutputsOK",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersFailed": {
-              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedContainers": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersRunning": {
-              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersLaunched": {
-              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableGB": {
-              "metric": "yarn.NodeManagerMetrics.AvailableGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleConnections": {
-              "metric": "mapred.ShuffleConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersIniting": {
-              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputBytes": {
-              "metric": "mapred.ShuffleOutputBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.metrics.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsFailed": {
-              "metric": "mapred.ShuffleOutputsFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.metrics.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersCompleted": {
-              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersKilled": {
-              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedGB": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsOK": {
-              "metric": "mapred.ShuffleOutputsOK",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersFailed": {
-              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedContainers": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersRunning": {
-              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersLaunched": {
-              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableGB": {
-              "metric": "yarn.NodeManagerMetrics.AvailableGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleConnections": {
-              "metric": "mapred.ShuffleConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersIniting": {
-              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputBytes": {
-              "metric": "mapred.ShuffleOutputBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.metrics.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logError": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ]
-  },
-  "RESOURCEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "yarn.ClusterMetrics.NumLostNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "yarn.ClusterMetrics.NumActiveNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-              "pointInTime": false,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/runtime/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/unhealthyNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/decommissionedNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/startTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/nodeManagers": {
-              "metric": "Hadoop:service=ResourceManager,name=RMNMInfo.LiveNodeManagers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logError": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/activeNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/lostNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "yarn.ClusterMetrics.NumLostNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "yarn.ClusterMetrics.NumActiveNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_num": {
-              "metric": "cpu_num",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-              "pointInTime": false,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-              "pointInTime": true,
-         

<TRUNCATED>

[11/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
deleted file mode 100644
index 812b897..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
+++ /dev/null
@@ -1,718 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
-    TYPE_NAME VARCHAR2(4000) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(128) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL, 
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
deleted file mode 100644
index bc6486b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
+++ /dev/null
@@ -1,1406 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_OLD" (
-    "SD_ID" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000),
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(128)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
-
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
---
--- PostgreSQL database dump complete
---
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/metainfo.xml
deleted file mode 100644
index 12f359a..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,280 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <displayName>Hive</displayName>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.13.1.phd.3.0.0.0</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <displayName>Hive Metastore</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs />
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <displayName>HiveServer2</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs />
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs>
-            <client>HCAT</client>
-          </clientsToUpdateConfigs>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>POSTGRESQL_SERVER</name>
-          <displayName>PostgreSQL Server</displayName>
-          <category>MASTER</category>
-          <cardinality>0-1</cardinality>
-          <commandScript>
-            <script>scripts/postgresql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MYSQL_SERVER</name>
-          <displayName>MySQL Server</displayName>
-          <category>MASTER</category>
-          <cardinality>0-1</cardinality>
-          <clientsToUpdateConfigs />
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <displayName>Hive Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-env.sh</fileName>
-              <dictionaryName>hive-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-log4j.properties</fileName>
-              <dictionaryName>hive-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-exec-log4j.properties</fileName>
-              <dictionaryName>hive-exec-log4j</dictionaryName>
-            </configFile>                         
-          </configFiles>
-        </component>
-        <component>
-          <name>HCAT</name>
-          <displayName>HCat Client</displayName>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>hcat-env.sh</fileName>
-              <dictionaryName>hcat-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive</name>
-            </package>
-            <package>
-              <name>hive-hcatalog</name>
-            </package>
-            <package>
-              <name>hive-webhcat</name>
-            </package>
-            <package>
-              <name>postgresql-server</name>
-            </package>
-            <package>
-              <name>postgresql-jdbc</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>mysql</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>mysql-server</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>mysql-client</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>hive-env</config-type>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index bd00c1f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addPostgreSQLUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addPostgreSQLUser.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addPostgreSQLUser.sh
deleted file mode 100644
index 5860810..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/addPostgreSQLUser.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-postgresqlservice=$1
-postgresqluser=$2
-postgresqlpasswd=$3
-postgresqldb=$4
-postgresqlport=5432
-
-# check if the database has already been created.
-check_cmd="psql -U postgres -c \"select datname from pg_catalog.pg_database where datname = '$postgresqldb';\" | grep \"$postgresqldb\" > /dev/null"
-eval $check_cmd
-if [ $? -eq 0 ]; then
-  echo "The database $postgresqldb has already been created. No change is made to the system."
-  exit 0
-else
-  # We need to create a database here, because postgresql grants privileges on objects.
-  echo "Creating database \"$postgresqldb\""
-  psql -U postgres -p $postgresqlport -c "CREATE DATABASE \"$postgresqldb\";"
-
-  echo "Adding user \"$postgresqluser\""
-  psql -U postgres -p $postgresqlport -c "CREATE USER \"$postgresqluser\" WITH NOSUPERUSER NOCREATEDB NOCREATEROLE PASSWORD '$postgresqlpasswd';"
-  psql -U postgres -p $postgresqlport -c "GRANT ALL PRIVILEGES ON DATABASE \"$postgresqldb\" TO \"$postgresqluser\";"
-fi
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index d1e2ff1..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index f9f2020..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 77d7b3e..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi


[06/23] ambari git commit: AMBARI-12779: [PluggableStackDefinition] Remove ambari-server/src/main/resources/stacks/PHD (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index bcff8ac..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1365 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-{% for cfg_file in cfg_files %}
-cfg_file={{cfg_file}}
-{% endfor %}
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file={{conf_dir}}/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file={{conf_dir}}/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file={{conf_dir}}/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file={{conf_dir}}/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir={{conf_dir}}/servers
-#cfg_dir={{conf_dir}}/printers
-#cfg_dir={{conf_dir}}/switches
-#cfg_dir={{conf_dir}}/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=0
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=1
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-# NAGIOS_* macros are required for Ambari Maintenance Mode (mm_wrapper.py)
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index f415e65..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias {{cgi_weblink}} "{{cgi_dir}}"
-
-<Directory "{{cgi_dir}}">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile {{conf_dir}}/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "{{nagios_web_dir}}"
-{# Ubuntu has different nagios url #}
-{% if os_family == "ubuntu" %}
-Alias /nagios3 "{{nagios_web_dir}}"
-{% endif %}
-
-<Directory "{{nagios_web_dir}}">
-#  SSLRequireSSL
-   Options FollowSymLinks
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile {{conf_dir}}/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 0927915..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,164 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="{{conf_dir}}/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 291d90f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-{#
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
-  limitations under the License.
-#}
-
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-env.xml
deleted file mode 100644
index 88f5a1b..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-env.xml
+++ /dev/null
@@ -1,129 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <property-type>USER</property-type>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value>New Derby Database</value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-  <property>
-    <name>oozie_admin_port</name>
-    <value>11001</value>
-    <description>The admin port Oozie server runs.</description>
-  </property>
-
-  <!-- oozie-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for oozie-env.sh file</description>
-    <value>
-#!/bin/bash
-
-if [ -d "/usr/lib/phd-tomcat" ]; then
-  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
-  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}
-  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
-  export OOZIE_CATALINA_HOME=/usr/lib/phd-tomcat
-fi
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-export JRE_HOME=${JAVA_HOME}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-export OOZIE_HTTP_PORT={{oozie_server_port}}
-
-# The admin port Oozie server runs
-#
-export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-log4j.xml
deleted file mode 100644
index 7f7158f..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-log4j.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 83ca880..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration supports_final="true">
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.security.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
-      #AUTHENTICATION_HANDLER_CLASSNAME#.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>
-
-
-
-
-
-    </value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>
-      org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor
-    </value>
-    <description>
-      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
-      be used in workflows. This property is a convenience property to add extensions to the built in executors without
-      having to include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
-    <description>
-      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
-      trims the value, if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      Database user name to use to connect to the database
-    </description>
-  </property>
-
-  <property require-input = "true">
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value> </value>
-    <property-type>PASSWORD</property-type>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.services</name>
-    <value>
-      org.apache.oozie.service.SchedulerService,
-      org.apache.oozie.service.InstrumentationService,
-      org.apache.oozie.service.CallableQueueService,
-      org.apache.oozie.service.UUIDService,
-      org.apache.oozie.service.ELService,
-      org.apache.oozie.service.AuthorizationService,
-      org.apache.oozie.service.UserGroupInformationService,
-      org.apache.oozie.service.HadoopAccessorService,
-      org.apache.oozie.service.URIHandlerService,
-      org.apache.oozie.service.MemoryLocksService,
-      org.apache.oozie.service.DagXLogInfoService,
-      org.apache.oozie.service.SchemaService,
-      org.apache.oozie.service.LiteWorkflowAppService,
-      org.apache.oozie.service.JPAService,
-      org.apache.oozie.service.StoreService,
-      org.apache.oozie.service.CoordinatorStoreService,
-      org.apache.oozie.service.SLAStoreService,
-      org.apache.oozie.service.DBLiteWorkflowStoreService,
-      org.apache.oozie.service.CallbackService,
-      org.apache.oozie.service.ActionService,
-      org.apache.oozie.service.ActionCheckerService,
-      org.apache.oozie.service.RecoveryService,
-      org.apache.oozie.service.PurgeService,
-      org.apache.oozie.service.CoordinatorEngineService,
-      org.apache.oozie.service.BundleEngineService,
-      org.apache.oozie.service.DagEngineService,
-      org.apache.oozie.service.CoordMaterializeTriggerService,
-      org.apache.oozie.service.StatusTransitService,
-      org.apache.oozie.service.PauseTransitService,
-      org.apache.oozie.service.GroupsService,
-      org.apache.oozie.service.ProxyUserService
-    </value>
-    <description>List of Oozie services</description>
-  </property>
-  <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-    <description>
-      Enlist the different uri handlers supported for data availability checks.
-    </description>
-  </property>
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
-    </value>
-    <description>
-      To add/replace services defined in 'oozie.services' with custom implementations.
-      Class names must be separated by commas.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-    <description>
-      Command re-queue interval for push dependencies (in millisecond).
-    </description>
-  </property>
-  <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-    <description>
-      Credential Class to be used for HCat.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/930d4499/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/metainfo.xml
deleted file mode 100644
index 0f388f6..0000000
--- a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,154 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>OOZIE</name>
-      <displayName>Oozie</displayName>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>4.0.1.phd.3.0.0.0</version>
-      <components>
-        <component>
-          <name>OOZIE_SERVER</name>
-          <displayName>Oozie Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>OOZIE_CLIENT</name>
-          <displayName>Oozie Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>oozie-site.xml</fileName>
-              <dictionaryName>oozie-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>oozie-env.sh</fileName>
-              <dictionaryName>oozie-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>oozie-log4j.properties</fileName>
-              <dictionaryName>oozie-log4j</dictionaryName>
-            </configFile>            
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>oozie</name>
-            </package>
-            <package>
-              <name>oozie-client</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>libxml2-utils</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>oozie-site</config-type>
-        <config-type>oozie-env</config-type>
-        <config-type>oozie-log4j</config-type>
-        <config-type>yarn-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>