You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by eb...@apache.org on 2014/09/23 22:38:42 UTC

[1/7] AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Repository: ambari
Updated Branches:
  refs/heads/trunk 0680d1e5c -> 3f7fdf501


http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100644
index 0000000..862b4c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+  command = "curl"
+  httpGssnegotiate = "--negotiate"
+  userpswd = "-u:"
+  insecure = "-k"# This is smoke test, no need to check CA of server
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+
+  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+
+  proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+  (stdout, stderr) = proc.communicate()
+  response = json.loads(stdout)
+  if response == None:
+    print 'There is no response for url: ' + str(url)
+    raise Exception('There is no response for url: ' + str(url))
+  return response
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, addresses, ssl_enabled):
+  responses = {}
+  for address in addresses.split(','):
+    try:
+      responses[address] = getResponse(path, address, ssl_enabled)
+    except Exception as e:
+      print 'Error checking availability status of component.', e
+
+  if not responses:
+    exit(1)
+
+  is_valid = validateAvailabilityResponse(component, responses.values()[0])
+  if not is_valid:
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, addresses, ssl_enabled):
+  responses = {}
+  for address in addresses.split(','):
+    try:
+      responses[address] = getResponse(path, address, ssl_enabled)
+    except Exception as e:
+      print 'Error checking ability of component.', e
+
+  if not responses:
+    exit(1)
+
+  is_valid = validateAbilityResponse(component, responses.values()[0])
+  if not is_valid:
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/application_timeline_server.py
new file mode 100644
index 0000000..0719da8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/application_timeline_server.py
@@ -0,0 +1,55 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from yarn import yarn
+from service import service
+
+class ApplicationTimelineServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    #self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('historyserver', action='start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    service('historyserver', action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.yarn_historyserver_pid_file)
+
+if __name__ == "__main__":
+  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/historyserver.py
new file mode 100644
index 0000000..d5c6db2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Histroryserver(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="historyserver")
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('historyserver', action='start', serviceName='mapreduce')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    service('historyserver', action='stop', serviceName='mapreduce')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.mapred_historyserver_pid_file)
+
+if __name__ == "__main__":
+  Histroryserver().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapred_service_check.py
new file mode 100644
index 0000000..4685716
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,73 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
+    create_file_cmd = format("fs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+    ExecuteHadoop(cleanup_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100644
index 0000000..831e5e8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class MapReduce2Client(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/nodemanager.py
new file mode 100644
index 0000000..8e153e0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="nodemanager")
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('nodemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+if __name__ == "__main__":
+  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
new file mode 100644
index 0000000..0fdaf18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
@@ -0,0 +1,143 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+config_dir = "/etc/hadoop/conf"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+rm_hosts = config['clusterHostInfo']['rm_host']
+rm_host = rm_hosts[0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+rm_nodes_exclude_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
+ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
+mapred_env_sh_template = config['configurations']['mapred-env']['content']
+yarn_env_sh_template = config['configurations']['yarn-env']['content']
+
+if len(rm_hosts) > 1:
+  additional_rm_host = rm_hosts[1]
+  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
+else:
+  rm_webui_address = format("{rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+
+user_group = config['configurations']['cluster-env']['user_group']
+limits_conf_dir = "/etc/security/limits.d"
+hadoop_conf_dir = "/etc/hadoop/conf"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+
+#exclude file
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
+
+hostname = config['hostname']
+
+if security_enabled:
+  nm_principal_name = config['configurations']['yarn-site']['nodemanager_principal_name']
+  nodemanager_keytab = config['configurations']['yarn-site']['nodemanager_keytab']
+  nodemanager_principal_name = nm_principal_name.replace('_HOST',hostname.lower())
+  nm_kinit_cmd = format("{kinit_path_local} -kt {nodemanager_keytab} {nodemanager_principal_name};")
+else:
+  nm_kinit_cmd = ""
+
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
+
+#for create_hdfs_directory
+hostname = config["hostname"]
+hadoop_conf_dir = "/etc/hadoop/conf"
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local
+)
+update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+hadoop_bin = "/usr/lib/hadoop/sbin"
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+min_user_id = config['configurations']['yarn-env']['min_user_id']

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
new file mode 100644
index 0000000..a286ae3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,90 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+
+class Resourcemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    yarn(name='resourcemanager')
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('resourcemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    nm_kinit_cmd = params.nm_kinit_cmd
+    yarn_user = params.yarn_user
+    conf_dir = params.config_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("{nm_kinit_cmd} /usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd,
+            user=yarn_user)
+      pass
+    pass
+
+
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service.py
new file mode 100644
index 0000000..d854565
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(componentName, action='start', serviceName='yarn'):
+
+  import params
+
+  if (serviceName == 'mapreduce' and componentName == 'historyserver'):
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
+    usr = params.mapred_user
+  else:
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
+    usr = params.yarn_user
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{cmd} start {componentName}")
+    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            user=usr,
+            not_if=no_op
+    )
+
+    Execute(no_op,
+            user=usr,
+            not_if=no_op,
+            initial_wait=5
+    )
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {componentName}")
+    Execute(daemon_cmd,
+            user=usr,
+    )
+    rm_pid = format("rm -f {pid_file}")
+    Execute(rm_pid,
+            user=usr
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service_check.py
new file mode 100644
index 0000000..2ed67ab
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,67 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    run_yarn_check_cmd = "/usr/bin/yarn node -list"
+
+    component_type = 'rm'
+    if params.hadoop_ssl_enabled:
+      component_address = params.rm_webui_https_address
+    else:
+      component_address = params.rm_webui_address
+
+    validateStatusFileName = "validateYarnComponentStatus.py"
+    validateStatusFilePath = format("{tmp_dir}/{validateStatusFileName}")
+    python_executable = sys.executable
+    validateStatusCmd = format("{python_executable} {validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            logoutput=True
+    )
+
+    Execute(run_yarn_check_cmd,
+            user=params.smokeuser
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/status_params.py
new file mode 100644
index 0000000..a3a45be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/status_params.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn.py
new file mode 100644
index 0000000..48f32d2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,163 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def yarn(name = None):
+  import params
+
+
+  if name in ["nodemanager","historyserver"]:
+    if params.yarn_log_aggregation_enabled:
+      params.HdfsDirectory(params.yarn_nm_app_log_dir,
+                           action="create_delayed",
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0777,
+                           recursive_chmod=True
+      )
+    params.HdfsDirectory("/mapred",
+                         action="create_delayed",
+                         owner=params.mapred_user
+    )
+    params.HdfsDirectory("/mapred/system",
+                         action="create_delayed",
+                         owner=params.hdfs_user
+    )
+    params.HdfsDirectory(params.mapreduce_jobhistory_intermediate_done_dir,
+                         action="create_delayed",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         mode=0777
+    )
+
+    params.HdfsDirectory(params.mapreduce_jobhistory_done_dir,
+                         action="create_delayed",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         mode=01777
+    )
+    params.HdfsDirectory(None, action="create")
+
+  Directory([params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.mapred_pid_dir, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            recursive=True
+  )
+  Directory(params.nm_local_dirs.split(','),
+            owner=params.yarn_user,
+            recursive=True
+  )
+  Directory(params.nm_log_dirs.split(','),
+            owner=params.yarn_user,
+            recursive=True
+  )
+  Directory(params.yarn_log_dir_prefix,
+            owner=params.yarn_user,
+            recursive=True
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  if name == 'resourcemanager':
+    File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+    )
+
+  File(params.rm_nodes_exclude_path,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(format("{config_dir}/yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=Template('yarn-env.sh.j2')
+  )
+
+  if params.security_enabled:
+    container_executor = format("{yarn_container_bin}/container-executor")
+    File(container_executor,
+         group=params.yarn_executor_container_group,
+         mode=06050
+    )
+
+    File(format("{config_dir}/container-executor.cfg"),
+         group=params.user_group,
+         mode=0644,
+         content=Template('container-executor.cfg.j2')
+    )
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn_client.py
new file mode 100644
index 0000000..f6b4b44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class YarnClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100644
index 0000000..99e042b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,22 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users=hfds,yarn,mapred,bin
+min.user.id=1000

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100644
index 0000000..76caea4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile 32768
+{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn-env.sh.j2
new file mode 100644
index 0000000..abdb003
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn-env.sh.j2
@@ -0,0 +1,128 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn.conf.j2
new file mode 100644
index 0000000..be89b07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile 32768
+{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..f4c5513
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <comment>Centralized service which provides highly reliable distributed
+        coordination.</comment>
+      <version>3.4.5.2.0.6.0</version>
+    </service>
+  </services>
+</metainfo>


[4/7] AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Posted by eb...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.postgres.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
new file mode 100644
index 0000000..7b886e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
@@ -0,0 +1,1538 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+--
+-- PostgreSQL database dump complete
+--
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767) DEFAULT NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767) DEFAULT NULL,
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
new file mode 100644
index 0000000..d08b985
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
@@ -0,0 +1,165 @@
+SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;
+
+-- 15-HIVE-5700.oracle.sql
+-- Normalize the date partition column values as best we can. No schema changes.
+
+CREATE FUNCTION hive13_to_date(date_str IN VARCHAR2) RETURN DATE IS dt DATE; BEGIN dt := TO_DATE(date_str, 'YYYY-MM-DD'); RETURN dt; EXCEPTION WHEN others THEN RETURN null; END;/
+
+MERGE INTO PARTITION_KEY_VALS
+USING (
+  SELECT SRC.PART_ID as IPART_ID, SRC.INTEGER_IDX as IINTEGER_IDX, 
+     NVL(TO_CHAR(hive13_to_date(PART_KEY_VAL),'YYYY-MM-DD'), PART_KEY_VAL) as NORM
+  FROM PARTITION_KEY_VALS SRC
+    INNER JOIN PARTITIONS ON SRC.PART_ID = PARTITIONS.PART_ID
+    INNER JOIN PARTITION_KEYS ON PARTITION_KEYS.TBL_ID = PARTITIONS.TBL_ID
+      AND PARTITION_KEYS.INTEGER_IDX = SRC.INTEGER_IDX AND PARTITION_KEYS.PKEY_TYPE = 'date'
+) ON (IPART_ID = PARTITION_KEY_VALS.PART_ID AND IINTEGER_IDX = PARTITION_KEY_VALS.INTEGER_IDX)
+WHEN MATCHED THEN UPDATE SET PART_KEY_VAL = NORM;
+
+DROP FUNCTION hive13_to_date;
+
+-- 16-HIVE-6386.oracle.sql
+ALTER TABLE DBS ADD OWNER_NAME VARCHAR2(128);
+ALTER TABLE DBS ADD OWNER_TYPE VARCHAR2(10);
+
+-- 17-HIVE-6458.oracle.sql
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+-- 18-HIVE-6757.oracle.sql
+UPDATE SDS
+  SET INPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
+WHERE
+  INPUT_FORMAT= 'parquet.hive.DeprecatedParquetInputFormat' or
+  INPUT_FORMAT = 'parquet.hive.MapredParquetInputFormat'
+;
+
+UPDATE SDS
+  SET OUTPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
+WHERE
+  OUTPUT_FORMAT = 'parquet.hive.DeprecatedParquetOutputFormat'  or
+  OUTPUT_FORMAT = 'parquet.hive.MapredParquetOutputFormat'
+;
+
+UPDATE SERDES
+  SET SLIB='org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
+WHERE
+  SLIB = 'parquet.hive.serde.ParquetHiveSerDe'
+;
+
+-- hive-txn-schema-0.13.0.oracle.sql
+
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the License); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an AS IS BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.13.0.oracle.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.13.0.oracle.sql
new file mode 100644
index 0000000..b34f406
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/upgrade-0.13.0.oracle.sql
@@ -0,0 +1,38 @@
+ALTER TABLE TXNS MODIFY (
+  TXN_ID NUMBER(19),
+  TXN_STARTED NUMBER(19),
+  TXN_LAST_HEARTBEAT NUMBER(19)
+);
+
+ALTER TABLE TXN_COMPONENTS MODIFY (
+  TC_TXNID NUMBER(19)
+);
+
+ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY (
+  CTC_TXNID NUMBER(19)
+);
+
+ALTER TABLE NEXT_TXN_ID MODIFY (
+  NTXN_NEXT NUMBER(19)
+);
+
+ALTER TABLE HIVE_LOCKS MODIFY (
+  HL_LOCK_EXT_ID NUMBER(19),
+  HL_LOCK_INT_ID NUMBER(19),
+  HL_TXNID NUMBER(19),
+  HL_LAST_HEARTBEAT NUMBER(19),
+  HL_ACQUIRED_AT NUMBER(19)
+);
+
+ALTER TABLE NEXT_LOCK_ID MODIFY (
+  NL_NEXT NUMBER(19)
+);
+
+ALTER TABLE COMPACTION_QUEUE MODIFY (
+  CQ_ID NUMBER(19),
+  CQ_START NUMBER(19)
+);
+
+ALTER TABLE NEXT_COMPACTION_QUEUE_ID MODIFY (
+  NCQ_NEXT NUMBER(19)
+);

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..f1bbb08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/metainfo.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.0.6.1</version>
+
+      <components>
+        <component>
+          <name>HIVE_SERVER</name>
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive</name>
+            </package>
+            <package>
+              <name>hive-hcatalog</name>
+            </package>
+            <package>
+              <name>hive-webhcat</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 0000000..fa23cb5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,313 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+  </property>
+
+  <property>
+    <name>oozie.system.id</name>
+    <value>oozie-${user.name}</value>
+    <description>
+      The Oozie system ID.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
+      Oozie DataBase Name
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
+      System library path to use for workflow applications.
+      This path is added to workflow application if their job properties sets
+      the property 'oozie.use.system.libpath' to true.
+    </description>
+  </property>
+
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
+      If set to true, submissions of MapReduce and Pig jobs will include
+      automatically the system library path, thus not requiring users to
+      specify where the Pig JAR files are. Instead, the ones from the system
+      library path are used.
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
+      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      DEFAULT
+    </value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
+      org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
+
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
+      JDBC URL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value> </value>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService,
+      org.apache.oozie.service.XLogStreamingService,
+      org.apache.oozie.service.JobsConcurrencyService
+    </value>
+    <description>List of Oozie services</description>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+  </property>
+  <property>
+    <name>oozie.services.ext</name>
+    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.coord.push.check.requeue.interval</name>
+    <value>30000</value>
+    <description>
+      Command re-queue interval for push dependencies (in millisecond).
+    </description>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <description>
+      Credential Class to be used for HCat.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..26d2789
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/OOZIE/metainfo.xml
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.0.0.2.0.6.0</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie</name>
+            </package>
+            <package>
+              <name>oozie-client</name>
+            </package>
+            <package>
+              <name>falcon</name>
+            </package>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>extjs</name>
+            </package>
+            <package>
+              <name>libxml2-utils</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+      </osSpecifics>
+      <configuration-dependencies>
+        <config-type>oozie-env</config-type>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/configuration/pig-properties.xml
new file mode 100644
index 0000000..480be75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/configuration/pig-properties.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+# debug level, INFO is default
+debug=INFO
+
+# verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+# exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+# Enable insertion of information about script into hadoop job conf 
+pig.script.info.enabled=true
+
+# Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+
+# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+# This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+# the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+# Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+# Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false
+
+# Avoid pig failures when multiple jobs write to the same location
+pig.location.check.strict=false
+
+hcat.bin=/usr/bin/hcat
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/metainfo.xml
new file mode 100644
index 0000000..7ecf01b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/PIG/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.12.0.2.0.6.0</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..4db6892
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/SQOOP/metainfo.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.4.2.0.6.0</version>
+      <requiredServices>
+        <service>GLUSTERFS</service>
+      </requiredServices>      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2
new file mode 100644
index 0000000..3a2af49
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..671f328
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+<!-- GLUSTERFS properties -->
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>glusterfs:///mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>glusterfs:///mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+  <property>
+     <name>yarn.app.mapreduce.am.staging-dir</name>
+     <value>glusterfs:///user</value>
+     <description>
+       The staging dir used while submitting jobs.
+     </description>
+  </property>
+  <property>
+     <name>mapred.healthChecker.script.path</name>
+     <value>glusterfs:///mapred/jobstatus</value>
+   </property>
+  <property>
+     <name>mapred.job.tracker.history.completed.location</name>
+     <value>glusterfs:///mapred/history/done</value>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>glusterfs:///mapred/system</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>glusterfs:///user</value>
+  </property>
+
+<property>
+<name>mapred.healthChecker.script.path</name>
+<value>glusterfs:///mapred/jobstatus</value>
+</property>
+
+<property>
+<name>mapred.job.tracker.history.completed.location</name>
+<value>glusterfs:///mapred/history/done</value>
+</property>
+
+<property>
+<name>mapred.system.dir</name>
+<value>glusterfs:///mapred/system</value>
+</property>
+
+<property>
+<name>mapreduce.jobtracker.staging.root.dir</name>
+<value>glusterfs:///user</value>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2
new file mode 100644
index 0000000..6abb71d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+<!-- GLUSTERFS properties -->
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>glusterfs:///mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>glusterfs:///mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+  <property>
+     <name>yarn.app.mapreduce.am.staging-dir</name>
+     <value>glusterfs:///user</value>
+     <description>
+       The staging dir used while submitting jobs.
+     </description>
+  </property>
+  <property>
+     <name>mapred.healthChecker.script.path</name>
+     <value>glusterfs:///mapred/jobstatus</value>
+   </property>
+  <property>
+     <name>mapred.job.tracker.history.completed.location</name>
+     <value>glusterfs:///mapred/history/done</value>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>glusterfs:///mapred/system</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>glusterfs:///user</value>
+  </property>
+
+</configuration>


[5/7] AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Posted by eb...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..ad1df07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,502 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <description>Hive Java heap size</description>
+  </property>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property require-input="true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+     thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <description>The service principal for the metastore thrift server. The special
+    string _HOST will be replaced automatically with the correct host name.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+    Whether to enable automatic use of indexes
+    </description>
+  </property>
+
+  <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>Whether to use MR or Tez</description>
+  </property>
+
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of post-execution hooks to be invoked for each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of pre-execution hooks to be invoked for each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of on-failure hooks to be invoked for each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>1024</value>
+    <description>Number of entries added to the group by aggregation hash before a reocmputation of average entry size is performed.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed when the memory treshold is exceeded.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use this property to override that value. Assigned value must match value specified for mapreduce.map.child.java.opts.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC</value>
+    <description>Java command line options for Tez. Must be assigned the same value as mapreduce.map.child.java.opts.</description>
+  </property>
+
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely using stats
+      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable the optimization of trying a smaller subset of data for simple LIMIT first.</description>
+  </property>
+
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.default.queues</name>
+    <value>default</value>
+    <description>A comma-separated list of queues configured for the cluster.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>The number of sessions for each queue named in the hive.server2.tez.default.queues.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>false</value>
+    <description>Enables a user to use HiveServer2 without enabling Tez for HiveServer2. Users may potentially may want to run queries with Tez without a pool of sessions.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description>Select the class to do transaction management. The default DummyTxnManager does no transactions and retains the legacy behavior.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>Maximum number of transactions that can be fetched in one call to open_txns(). Increasing this will decrease the number of delta files created when streaming data into Hive. But it will also increase the number of open transactions at any given time, possibly impacting read performance.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>Time, in seconds, before a given compaction in working state is declared a failure and returned to the initiated state.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>Time in seconds between checks to see if any partitions need compacted. This should be kept high because each check for compaction requires many calls against the NameNode.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <value>10000</value>
+    <description>
+      TCP port number to listen on, default 10000.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its
+      clients. To support this, each instance of HiveServer2 currently uses
+      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
+      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
+      connection string.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when
+      supporting dynamic service discovery.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.mysql.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
new file mode 100644
index 0000000..89ce15d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
@@ -0,0 +1,889 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.oracle.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
new file mode 100644
index 0000000..6bd8df9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
@@ -0,0 +1,835 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+


[2/7] AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Posted by eb...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metrics.json
new file mode 100644
index 0000000..68efe9f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metrics.json
@@ -0,0 +1,2534 @@
+{
+  "NODEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsFailed": {
+            "metric": "mapred.ShuffleOutputsFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.metrics.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersCompleted": {
+            "metric": "yarn.ContainersCompleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersKilled": {
+            "metric": "yarn.ContainersKilled",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedGB": {
+            "metric": "yarn.AllocatedGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsOK": {
+            "metric": "mapred.ShuffleOutputsOK",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersFailed": {
+            "metric": "yarn.ContainersFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedContainers": {
+            "metric": "yarn.AllocatedContainers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersRunning": {
+            "metric": "yarn.ContainersRunning",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersLaunched": {
+            "metric": "yarn.ContainersLaunched",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AvailableGB": {
+            "metric": "yarn.AvailableGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleConnections": {
+            "metric": "mapred.ShuffleConnections",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersIniting": {
+            "metric": "yarn.ContainersIniting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputBytes": {
+            "metric": "mapred.ShuffleOutputBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.metrics.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsFailed": {
+            "metric": "mapred.ShuffleOutputsFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.metrics.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersCompleted": {
+            "metric": "yarn.ContainersCompleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersKilled": {
+            "metric": "yarn.ContainersKilled",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedGB": {
+            "metric": "yarn.AllocatedGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsOK": {
+            "metric": "mapred.ShuffleOutputsOK",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersFailed": {
+            "metric": "yarn.ContainersFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedContainers": {
+            "metric": "yarn.AllocatedContainers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersRunning": {
+            "metric": "yarn.ContainersRunning",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersLaunched": {
+            "metric": "yarn.ContainersLaunched",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AvailableGB": {
+            "metric": "yarn.AvailableGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleConnections": {
+            "metric": "mapred.ShuffleConnections",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersIniting": {
+            "metric": "yarn.ContainersIniting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputBytes": {
+            "metric": "mapred.ShuffleOutputBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.metrics.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ]
+  },
+  "RESOURCEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "yarn.ClusterMetrics.NumLostNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "yarn.ClusterMetrics.NumActiveNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisCopy": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memMaxM": {
+            "metric": "jvm.JvmMetrics.MemMaxM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountCopy": {
+            "metric": "jvm.JvmMetrics.GcCountCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+            "pointInTime": false,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/HeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryMax":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/runtime/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/unhealthyNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/decommissionedNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/startTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/nodeManagers": {
+            "metric": "Hadoop:service=ResourceManager,name=RMNMInfo.LiveNodeManagers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/activeNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/lostNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "yarn.ClusterMetrics.NumLostNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "yarn.ClusterMetrics.NumActiveNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisCopy": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memMaxM": {
+            "metric": "jvm.JvmMetrics.MemMaxM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountCopy": {
+            "metric": "jvm.JvmMetrics.GcCountCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+            "pointInTime": false,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/HeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryMax":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true

<TRUNCATED>

[3/7] AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Posted by eb...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..261d872
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,124 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.2</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+  </property>
+  
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/mapred-site.xml.2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/mapred-site.xml.2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/mapred-site.xml.2
new file mode 100644
index 0000000..6abb71d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/mapred-site.xml.2
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+<!-- GLUSTERFS properties -->
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>glusterfs:///mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>glusterfs:///mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+  <property>
+     <name>yarn.app.mapreduce.am.staging-dir</name>
+     <value>glusterfs:///user</value>
+     <description>
+       The staging dir used while submitting jobs.
+     </description>
+  </property>
+  <property>
+     <name>mapred.healthChecker.script.path</name>
+     <value>glusterfs:///mapred/jobstatus</value>
+   </property>
+  <property>
+     <name>mapred.job.tracker.history.completed.location</name>
+     <value>glusterfs:///mapred/history/done</value>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>glusterfs:///mapred/system</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>glusterfs:///user</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-env.xml
new file mode 100644
index 0000000..60109c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,181 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <description>YARN Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <description>YARN PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <value>yarn</value>
+    <description>YARN User</description>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for NameNode using a numerical value in the scale of MB</description>
+  </property>
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..893ccd8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,413 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>2048</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+  </property>
+
+  <property>
+    <name>yarn.admin.acl</name>
+    <value></value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+  </property>
+
+  <!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+    <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.GlusterContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
+      not start with numbers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+  </property>
+
+  <!--
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check_nodemanager</value>
+    <description>The health check script to run.</description>
+  </property>
+   -->
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation. </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>false</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
+    <description>
+      Store class name for timeline store
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/mnt/glusterfs/hadoop/yarn/timeline</value>
+    <description>
+      Store file name for leveldb timeline store
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>0.0.0.0:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>0.0.0.0:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>0.0.0.0:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+  </property>
+  <property>
+    <description>Enable age off of timeline store data.</description>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+  </property>
+  <property>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <value>2678400000</value>
+  </property>
+  <property>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <value>300000</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metainfo.xml
new file mode 100644
index 0000000..64fab13
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/metainfo.xml
@@ -0,0 +1,143 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.2.0.2.0.6.0</version>
+      <components>
+
+        <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <displayName>App Timeline Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/application_timeline_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>GLUSTERFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+    <service>
+      <name>MAPREDUCE2</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.2.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
+</metainfo>


[7/7] git commit: AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Posted by eb...@apache.org.
AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3f7fdf50
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3f7fdf50
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3f7fdf50

Branch: refs/heads/trunk
Commit: 3f7fdf5018d9f01cd6200a4a31dfccb3c8450559
Parents: 0680d1e
Author: root <ro...@ambari.rhs>
Authored: Tue Sep 23 16:33:51 2014 -0400
Committer: root <ro...@ambari.rhs>
Committed: Tue Sep 23 16:34:09 2014 -0400

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6.GlusterFS/metainfo.xml     |    6 +-
 .../HDP/2.0.6.GlusterFS/repos/repoinfo.xml      |   39 +
 .../HDP/2.0.6.GlusterFS/role_command_order.json |   30 +-
 .../GLUSTERFS/configuration/core-site.xml       |  272 +-
 .../services/GLUSTERFS/configuration/global.xml |   35 -
 .../GLUSTERFS/configuration/hadoop-env.xml      |  209 +
 .../services/GLUSTERFS/metainfo.xml             |   36 +-
 .../services/HBASE/configuration/hbase-site.xml |  370 +
 .../2.0.6.GlusterFS/services/HBASE/metainfo.xml |   34 +
 .../2.0.6.GlusterFS/services/HDFS/metainfo.xml  |   46 +-
 .../2.0.6.GlusterFS/services/HDFS/metrics.json  | 7840 ------------------
 .../services/HIVE/configuration/hive-site.xml   |  502 ++
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 ++++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../2.0.6.GlusterFS/services/HIVE/metainfo.xml  |  100 +
 .../services/OOZIE/configuration/oozie-site.xml |  313 +
 .../2.0.6.GlusterFS/services/OOZIE/metainfo.xml |   78 +
 .../PIG/configuration/pig-properties.xml        |   91 +
 .../2.0.6.GlusterFS/services/PIG/metainfo.xml   |   27 +
 .../2.0.6.GlusterFS/services/SQOOP/metainfo.xml |   32 +
 .../YARN/configuration-mapred/core-site.xml.2   |   20 +
 .../YARN/configuration-mapred/mapred-site.xml   |   88 +
 .../YARN/configuration-mapred/mapred-site.xml.2 |   68 +
 .../YARN/configuration/capacity-scheduler.xml   |  124 +
 .../YARN/configuration/mapred-site.xml.2        |   68 +
 .../services/YARN/configuration/yarn-env.xml    |  181 +
 .../services/YARN/configuration/yarn-site.xml   |  413 +
 .../2.0.6.GlusterFS/services/YARN/metainfo.xml  |  143 +
 .../2.0.6.GlusterFS/services/YARN/metrics.json  | 2534 ++++++
 .../files/validateYarnComponentStatus.py        |  170 +
 .../services/YARN/package/scripts/__init__.py   |   20 +
 .../scripts/application_timeline_server.py      |   55 +
 .../YARN/package/scripts/historyserver.py       |   53 +
 .../package/scripts/mapred_service_check.py     |   73 +
 .../YARN/package/scripts/mapreduce2_client.py   |   42 +
 .../YARN/package/scripts/nodemanager.py         |   59 +
 .../services/YARN/package/scripts/params.py     |  143 +
 .../YARN/package/scripts/resourcemanager.py     |   90 +
 .../services/YARN/package/scripts/service.py    |   62 +
 .../YARN/package/scripts/service_check.py       |   67 +
 .../YARN/package/scripts/status_params.py       |   35 +
 .../services/YARN/package/scripts/yarn.py       |  163 +
 .../YARN/package/scripts/yarn_client.py         |   42 +
 .../package/templates/container-executor.cfg.j2 |   22 +
 .../YARN/package/templates/mapreduce.conf.j2    |   17 +
 .../YARN/package/templates/yarn-env.sh.j2       |  128 +
 .../YARN/package/templates/yarn.conf.j2         |   17 +
 .../services/ZOOKEEPER/metainfo.xml             |   28 +
 51 files changed, 10240 insertions(+), 8210 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/metainfo.xml
index e1b0ec9..b7d8766 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/metainfo.xml
@@ -16,8 +16,8 @@
    limitations under the License.
 -->
 <metainfo>
-  <versions>
+    <versions>
 	  <active>false</active>
-  </versions>
-  <extends>2.0.6</extends>
+    </versions>
+    <extends>2.0.6</extends>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/repos/repoinfo.xml
index 296dcd5..f86b40a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/repos/repoinfo.xml
@@ -16,12 +16,41 @@
    limitations under the License.
 -->
 <reposinfo>
+  <os type="centos6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.6.GlusterFS</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>    
+  </os>
+  <os type="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
+      <repoid>HDP-2.0.6.GlusterFS</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>    
+  </os>
   <os type="redhat6">
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6.GlusterFS</repoid>
       <reponame>HDP</reponame>
     </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>     
   </os>
   <os type="redhat5">
     <repo>
@@ -29,12 +58,22 @@
       <repoid>HDP-2.0.6.GlusterFS</repoid>
       <reponame>HDP</reponame>
     </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.17</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>     
   </os>
   <os type="suse11">
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6.GlusterFS</repoid>
       <reponame>HDP</reponame>
+      <repo>
+        <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/suse11</baseurl>
+        <repoid>HDP-UTILS-1.1.0.17</repoid>
+        <reponame>HDP-UTILS</reponame>
+      </repo>      
     </repo>
   </os>
 </reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
index 752f45b..82cbd79 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
@@ -5,6 +5,11 @@
     "_comment" : "dependencies for all cases",
     "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
         "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START"],
+    "SUPERVISOR-START" : ["NIMBUS-START"],
+    "STORM_UI_SERVER-START" : ["NIMBUS-START"],
+    "DRPC_SERVER-START" : ["NIMBUS-START"],
+    "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", "SUPERVISOR-START", "DRPC_SERVER-START"],
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
@@ -20,17 +25,18 @@
         "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
         "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
     "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START",
+        "DRPC_SERVER-START"],
     "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"]
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
   "optional_glusterfs": {
@@ -46,6 +52,9 @@
     "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
     "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
     "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
     "HIVE_SERVER-START": ["DATANODE-START"],
     "WEBHCAT_SERVER-START": ["DATANODE-START"],
     "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
@@ -58,16 +67,15 @@
     "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"],
     "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"]
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"]
   },
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
-    "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
-    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
+    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
+    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
index afeada6..553b48d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -22,280 +22,28 @@
 
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
- <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value></value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
 <!-- file system properties -->
 
   <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>glusterfs:///</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-  
-  <property>
-<name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>glusterfs:///</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-</property>
-
-<property>
-<name>gluster.daemon.user</name>
-<value>yarn</value>
-<description>GlusterFS Daemon user</description>
-</property>
-
-<property>
-<name>fs.AbstractFileSystem.glusterfs.impl</name>
-<value>org.apache.hadoop.fs.local.GlusterFs</value>
-<description>GlusterFS Abstract Filesystem declaration</description>
-</property>
-
-<property>
-<name>fs.glusterfs.impl</name>
-<value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
-  <description>GlusterFS fs impl</description>
-</property>
-
-<property>
-<name>fs.glusterfs.volname</name>
-<value>HadoopVol</value>
-<description>GlusterFS volume name</description>
-</property>
-
-<property>
-<name>fs.glusterfs.mount</name>
-<value>/mnt/glusterfs</value>
-<description>GlusterFS mount point</description>
-</property>
-
-<property>
-<name>fs.glusterfs.getfattrcmd</name>
-<value>sudo getfattr -m . -n trusted.glusterfs.pathinfo</value>
-<description>GlusterFS getfattr command</description>
-</property>
-
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
+  <name>fs.AbstractFileSystem.glusterfs.impl</name>
+  <value>org.apache.hadoop.fs.local.GlusterFs</value>
   </property>
 
   <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
+  <name>fs.glusterfs.impl</name>
+  <value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
   </property>
 
-  <!-- ipc properties: copied from kryptonite configuration -->
   <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
+  <name>fs.glusterfs.volumes</name>
+    <description>The name of the gluster volume(s) you would like Hadoop to use.  Values should be seperated by commas i.e. gv0, gv1</description>
+  <value>gv0</value>
   </property>
 
   <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value></value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
+   <name>fs.glusterfs.volume.fuse.gv0</name>
+  <description>The mount point that corresponds to the fs.glusterfs.volumes value</description>
+    <value>/mnt/gv0</value>
   </property>
 
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/global.xml
deleted file mode 100644
index bb1f845..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/global.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
- <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..6c6d4c1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -0,0 +1,209 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>glusterfs:///localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value>glusterfs:///localhost:8020</value>
+    <description>The name of the default file system.  Either the
+         literal string "local" or a host:port for NDFS.</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+ <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>glusterfs_user</name>
+    <value>root</value>
+    <description></description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User to run HDFS as</description>
+  </property>
+  <!--
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  -->
+    <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
index 37069e5..2f2c640 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
@@ -20,6 +20,7 @@
   <services>
     <service>
       <name>GLUSTERFS</name>
+      <displayName>GLUSTERFS</displayName>
       <comment>An Hadoop Compatible File System</comment>
       <version>3.4.0</version>
       <components>
@@ -31,26 +32,31 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>global.xml</fileName>
-              <dictionaryName>global</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>        
-          </configFiles>          
         </component>
       </components>
+<!--
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any<osFamily>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>glusterfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+-->
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
 
       <configuration-dependencies>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <!--<config-type>hdfs-site</config-type>-->
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..cf8ddd3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,370 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>glusterfs:///hbase</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <description>The port the HBase Master should bind to.</description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>60010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>60</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>86400000</value>
+    <description>The time (in milliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>2</value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..8580abd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.96.1.2.0.6.1</version>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>GLUSTERFS</service>
+      </requiredServices>      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metainfo.xml
index 9768a1f..c133ea4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metainfo.xml
@@ -19,51 +19,9 @@
   <schemaVersion>2.0</schemaVersion>
   <services>
     <service>
-      <name>GLUSTERFS</name>
-      <comment>An Hadoop Compatible File System</comment>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
       <version>2.2.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>GLUSTERFS_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/glusterfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>global.xml</fileName>
-              <dictionaryName>global</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>hdfs-site.xml</fileName>
-              <dictionaryName>hdfs-site</dictionaryName>
-            </configFile>                    
-          </configFiles>           
-        </component>
-      </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-site</config-type>
-      </configuration-dependencies>
-
     </service>
   </services>
 </metainfo>


[6/7] AMBARI-7451 Fix 2.0.6.GlusterFS Stack on latest 1.7 build - refactor to make compliant with new architecture (Scott Creeley via eboyd)

Posted by eb...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3f7fdf50/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metrics.json
deleted file mode 100644
index 53996cb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/metrics.json
+++ /dev/null
@@ -1,7840 +0,0 @@
-{
-  "NAMENODE": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "dfs.FSNamesystem.CapacityTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "dfs.FSNamesystem.CapacityRemaining",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-            "pointInTime": false,
-            "temporal": true
-          },           
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Free": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Total": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryMax": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryMax": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Threads": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NameDirStatuses": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalBlocks": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "dfs.FSNamesystem.CapacityTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "dfs.FSNamesystem.CapacityRemaining",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-            "pointInTime": false,
-            "temporal": true
-          },           
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/runtime/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namen

<TRUNCATED>